Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Embrace codespell: config, workflow (to alert when new typos added) and get typos fixed #2214

Merged
merged 4 commits into from
Dec 14, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .codespellrc
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# See https://github.com/codespell-project/codespell#using-a-config-file
[codespell]
cevich marked this conversation as resolved.
Show resolved Hide resolved
skip = .git,*.pdf,*.svg,.codespellrc,go.sum,system_registries_v2_test.go,Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej,*.gpg
check-hidden = true
ignore-regex = \b(isT|BU|this/doesnt:match)\b
ignore-words-list = te,pathc
2 changes: 1 addition & 1 deletion .github/renovate.json5
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
****** Global/general configuration options *****
*************************************************/

// Re-use predefined sets of configuration options to DRY
// Reuse predefined sets of configuration options to DRY
"extends": [
// https://github.com/containers/automation/blob/main/renovate/defaults.json5
"github>containers/automation//renovate/defaults.json5"
Expand Down
23 changes: 23 additions & 0 deletions .github/workflows/codespell.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
---
name: Codespell

on:
push:
branches: [main]
cevich marked this conversation as resolved.
Show resolved Hide resolved
pull_request:
branches: [main]
cevich marked this conversation as resolved.
Show resolved Hide resolved

permissions:
contents: read

jobs:
codespell:
name: Check for spelling errors
runs-on: ubuntu-latest

steps:
- name: Checkout
uses: actions/checkout@v3
- name: Codespell
cevich marked this conversation as resolved.
Show resolved Hide resolved
# uses configuration within .codespellrc file
uses: codespell-project/actions-codespell@v2
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -89,4 +89,4 @@ vendor-in-container:
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang go mod tidy

codespell:
codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L keypair,flate,uint,iff,od,ERRO -w
codespell -w
2 changes: 1 addition & 1 deletion internal/blobinfocache/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ type BlobInfoCache2 interface {
// that could possibly be reused within the specified (transport scope) (if they still
// exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
// up variants of the blob which have the same uncompressed digest.
//
Expand Down
2 changes: 1 addition & 1 deletion internal/imagedestination/wrapper.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ type wrapped struct {
//
// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
// with public methods, or perhaps a private interface), so that we can add methods
// without breaking any external implementors of a public interface.
// without breaking any external implementers of a public interface.
func FromPublic(dest types.ImageDestination) private.ImageDestination {
if dest2, ok := dest.(private.ImageDestination); ok {
return dest2
Expand Down
2 changes: 1 addition & 1 deletion internal/imagesource/wrapper.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ type wrapped struct {
//
// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
// with public methods, or perhaps a private interface), so that we can add methods
// without breaking any external implementors of a public interface.
// without breaking any external implementers of a public interface.
func FromPublic(src types.ImageSource) private.ImageSource {
if src2, ok := src.(private.ImageSource); ok {
return src2
Expand Down
4 changes: 2 additions & 2 deletions internal/manifest/oci_index_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -177,13 +177,13 @@ func TestOCI1IndexChooseInstanceByCompression(t *testing.T) {
{"amd64", "", "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", false},
// out of multiple gzip in arm64 select the first one to ensure original logic is prevented
{"arm64", "", "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", false},
// select a signle gzip s390x image
// select a single gzip s390x image
{"s390x", "", "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", false},
// out of gzip and zstd in amd64 select the first gzip image
{"amd64", "", "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", true},
// out of multiple gzip in arm64 select the first one to ensure original logic is prevented
{"arm64", "", "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", true},
// select a signle gzip s390x image
// select a single gzip s390x image
{"s390x", "", "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", true},
},
unmatchedInstances: []string{
Expand Down
2 changes: 1 addition & 1 deletion pkg/blobinfocache/boltdb/boltdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,7 @@ func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
// within the specified (transport scope) (if they still exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
Expand Down
4 changes: 2 additions & 2 deletions pkg/blobinfocache/internal/prioritize/prioritize.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,11 +91,11 @@ func min(a, b int) int {

// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
// number of entries to limit for known and unknown location separately, only to make testing simpler.
// TODO: following function is not destructive any more in the nature instead priortized result is actually copies of the original
// TODO: following function is not destructive any more in the nature instead prioritized result is actually copies of the original
// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix.
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 {
// split unknown candidates and known candidates
// and limit them seperately.
// and limit them separately.
var knownLocationCandidates []CandidateWithTime
var unknownLocationCandidates []CandidateWithTime
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
Expand Down
2 changes: 1 addition & 1 deletion pkg/blobinfocache/memory/memory.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused
// within the specified (transport scope) (if they still exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
Expand Down
4 changes: 2 additions & 2 deletions pkg/blobinfocache/sqlite/sqlite.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) {

// dbTransaction calls fn within a read-write transaction in db.
func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) {
// Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive dicussion.
// Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive discussion.

var zeroRes T // A zero value of T

Expand Down Expand Up @@ -496,7 +496,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
// that could possibly be reused within the specified (transport scope) (if they still
// exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
// up variants of the blob which have the same uncompressed digest.
//
Expand Down
2 changes: 1 addition & 1 deletion signature/fulcio_cert_stub.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,6 @@ func (f *fulcioTrustRoot) validate() error {
func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
return nil, errors.New("fulcio diabled at compile-time")
return nil, errors.New("fulcio disabled at compile-time")

}
2 changes: 1 addition & 1 deletion signature/simplesigning/signer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ func TestSimpleSignerSignImageManifest(t *testing.T) {

// Failures to sign need to be tested in two parts: First the failures that involve the wrong passphrase, then failures that
// should manifest even with a valid passphrase or unlocked key (because the GPG agent is caching unlocked keys).
// Alternatively, we could be caling gpgagent.KillGPGAgent() all the time...
// Alternatively, we could be calling gpgagent.KillGPGAgent() all the time...
type failingCase struct {
name string
opts []Option
Expand Down
2 changes: 1 addition & 1 deletion storage/storage_src.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c

var layers []storage.Layer

// If the digest was overriden by LayerInfosForCopy, then we need to use the TOC digest
// If the digest was overridden by LayerInfosForCopy, then we need to use the TOC digest
// to retrieve it from the storage.
s.getBlobMutex.Lock()
layerID, found := s.getBlobMutexProtected.digestToLayerID[digest]
Expand Down
2 changes: 1 addition & 1 deletion storage/storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ func makeLayerGoroutine(pwriter io.Writer, uncompressedCount *int64, compression
// We do not do that, to workaround https://github.com/containers/storage/issues/1729 :
// tar-split runs a goroutine that consumes/forwards tar content and might access
// concurrently-freed objects if it sees a valid EOF marker.
// Instead, realy on raw EOF to terminate the goroutine.
// Instead, really on raw EOF to terminate the goroutine.
// This depends on implementation details of tar.Writer (that it does not do any
// internal buffering).

Expand Down