From 75c3b33899954e7a0925426b38fd1084d521c3a0 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Thu, 28 Jan 2021 09:17:40 +0000 Subject: Bump github.com/containers/image/v5 from 5.9.0 to 5.10.0 Bumps [github.com/containers/image/v5](https://github.com/containers/image) from 5.9.0 to 5.10.0. - [Release notes](https://github.com/containers/image/releases) - [Commits](https://github.com/containers/image/compare/v5.9.0...v5.10.0) Signed-off-by: dependabot-preview[bot] Signed-off-by: Daniel J Walsh --- vendor/github.com/containers/image/v5/copy/copy.go | 110 +++++--- .../image/v5/directory/directory_dest.go | 7 +- .../containers/image/v5/docker/docker_client.go | 30 ++- .../image/v5/docker/docker_image_dest.go | 29 +- .../containers/image/v5/docker/docker_image_src.go | 5 + .../image/v5/docker/internal/tarfile/dest.go | 4 +- .../containers/image/v5/docker/lookaside.go | 8 +- .../image/v5/docker/policyconfiguration/naming.go | 21 ++ .../containers/image/v5/docker/tarfile/dest.go | 4 +- .../containers/image/v5/image/docker_schema2.go | 3 + vendor/github.com/containers/image/v5/image/oci.go | 4 + .../v5/internal/blobinfocache/blobinfocache.go | 63 +++++ .../image/v5/internal/blobinfocache/types.go | 45 ++++ .../containers/image/v5/manifest/common.go | 38 ++- .../containers/image/v5/manifest/docker_schema2.go | 2 + .../github.com/containers/image/v5/manifest/oci.go | 2 + .../containers/image/v5/oci/archive/oci_dest.go | 4 +- .../containers/image/v5/oci/layout/oci_dest.go | 5 +- .../containers/image/v5/openshift/openshift.go | 4 +- .../containers/image/v5/ostree/ostree_dest.go | 4 +- .../image/v5/pkg/blobinfocache/boltdb/boltdb.go | 85 +++++- .../internal/prioritize/prioritize.go | 12 +- .../image/v5/pkg/blobinfocache/memory/memory.go | 55 +++- .../image/v5/pkg/blobinfocache/none/none.go | 3 +- .../image/v5/pkg/docker/config/config.go | 36 ++- .../image/v5/pkg/shortnames/shortnames.go | 14 +- .../v5/pkg/sysregistriesv2/system_registries_v2.go | 10 +- .../containers/image/v5/signature/policy_config.go | 8 +- .../containers/image/v5/storage/storage_image.go | 4 +- .../github.com/containers/image/v5/tarball/doc.go | 17 +- .../github.com/containers/image/v5/types/types.go | 25 +- .../containers/image/v5/version/version.go | 2 +- .../klauspost/compress/flate/gen_inflate.go | 294 --------------------- .../github.com/klauspost/compress/huff0/README.md | 4 +- .../github.com/klauspost/compress/zstd/decoder.go | 7 +- vendor/github.com/klauspost/compress/zstd/zstd.go | 8 + vendor/github.com/ulikunitz/xz/SECURITY.md | 10 + vendor/github.com/ulikunitz/xz/TODO.md | 108 ++++---- vendor/github.com/ulikunitz/xz/format.go | 22 +- vendor/github.com/ulikunitz/xz/lzma/bintree.go | 5 +- vendor/github.com/ulikunitz/xz/lzma/bitops.go | 2 + vendor/github.com/ulikunitz/xz/lzma/decoder.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/decoderdict.go | 7 - vendor/github.com/ulikunitz/xz/lzma/directcodec.go | 11 - vendor/github.com/ulikunitz/xz/lzma/distcodec.go | 16 -- vendor/github.com/ulikunitz/xz/lzma/encoderdict.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/header2.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go | 13 - .../github.com/ulikunitz/xz/lzma/literalcodec.go | 7 - vendor/github.com/ulikunitz/xz/lzma/operation.go | 25 -- vendor/github.com/ulikunitz/xz/lzma/rangecodec.go | 26 -- vendor/github.com/ulikunitz/xz/lzma/reader2.go | 1 - vendor/github.com/ulikunitz/xz/lzma/state.go | 6 - vendor/github.com/ulikunitz/xz/reader.go | 15 -- vendor/github.com/ulikunitz/xz/writer.go | 4 + vendor/github.com/vbauerster/mpb/v5/.travis.yml | 3 + vendor/github.com/vbauerster/mpb/v5/bar_option.go | 11 +- vendor/github.com/vbauerster/mpb/v5/go.mod | 2 +- vendor/github.com/vbauerster/mpb/v5/go.sum | 4 +- vendor/github.com/vbauerster/mpb/v5/progress.go | 48 ++-- vendor/modules.txt | 9 +- 61 files changed, 683 insertions(+), 654 deletions(-) create mode 100644 vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go create mode 100644 vendor/github.com/containers/image/v5/internal/blobinfocache/types.go delete mode 100644 vendor/github.com/klauspost/compress/flate/gen_inflate.go create mode 100644 vendor/github.com/ulikunitz/xz/SECURITY.md (limited to 'vendor') diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index 485db4d30..b5c755e18 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -14,6 +14,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" + internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/pkg/platform" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache" @@ -47,7 +48,7 @@ var ( // maxParallelDownloads is used to limit the maxmimum number of parallel // downloads. Let's follow Firefox by limiting it to 6. - maxParallelDownloads = 6 + maxParallelDownloads = uint(6) ) // compressionBufferSize is the buffer size used to compress a blob @@ -107,18 +108,19 @@ func (d *digestingReader) Read(p []byte) (int, error) { // copier allows us to keep track of diffID values for blobs, and other // data shared across one or more images in a possible manifest list. type copier struct { - dest types.ImageDestination - rawSource types.ImageSource - reportWriter io.Writer - progressOutput io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties - blobInfoCache types.BlobInfoCache - copyInParallel bool - compressionFormat compression.Algorithm - compressionLevel *int - ociDecryptConfig *encconfig.DecryptConfig - ociEncryptConfig *encconfig.EncryptConfig + dest types.ImageDestination + rawSource types.ImageSource + reportWriter io.Writer + progressOutput io.Writer + progressInterval time.Duration + progress chan types.ProgressProperties + blobInfoCache internalblobinfocache.BlobInfoCache2 + copyInParallel bool + compressionFormat compression.Algorithm + compressionLevel *int + ociDecryptConfig *encconfig.DecryptConfig + ociEncryptConfig *encconfig.EncryptConfig + maxParallelDownloads uint } // imageCopier tracks state specific to a single image (possibly an item of a manifest list) @@ -190,6 +192,8 @@ type Options struct { // OciDecryptConfig contains the config that can be used to decrypt an image if it is // encrypted if non-nil. If nil, it does not attempt to decrypt an image. OciDecryptConfig *encconfig.DecryptConfig + // MaxParallelDownloads indicates the maximum layers to pull at the same time. A reasonable default is used if this is left as 0. + MaxParallelDownloads uint } // validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value @@ -265,9 +269,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually // we might want to add a separate CommonCtx — or would that be too confusing? - blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx), - ociDecryptConfig: options.OciDecryptConfig, - ociEncryptConfig: options.OciEncryptConfig, + blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)), + ociDecryptConfig: options.OciDecryptConfig, + ociEncryptConfig: options.OciEncryptConfig, + maxParallelDownloads: options.MaxParallelDownloads, } // Default to using gzip compression unless specified otherwise. if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil { @@ -648,13 +653,19 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support // without actually trying to upload something and getting a types.ManifestTypeRejectedError. - // So, try the preferred manifest MIME type. If the process succeeds, fine… + // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if + // we're altering how they're compressed. If the process succeeds, fine… manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) retManifestType = preferredManifestMIMEType if err != nil { logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) - // … if it fails, _and_ the failure is because the manifest is rejected, we may have other options. - if _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError); !isManifestRejected || len(otherManifestMIMETypeCandidates) == 0 { + // … if it fails, and the failure is either because the manifest is rejected by the registry, or + // because we failed to create a manifest of the specified type because the specific manifest type + // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may + // have other options available that could still succeed. + _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError) + _, isCompressionIncompatible := errors.Cause(err).(manifest.ManifestLayerCompressionIncompatibilityError) + if (!isManifestRejected && !isCompressionIncompatible) || len(otherManifestMIMETypeCandidates) == 0 { // We don’t have other options. // In principle the code below would handle this as well, but the resulting error message is fairly ugly. // Don’t bother the user with MIME types if we have no choice. @@ -809,7 +820,11 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { // avoid malicious images causing troubles and to be nice to servers. var copySemaphore *semaphore.Weighted if ic.c.copyInParallel { - copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads)) + max := ic.c.maxParallelDownloads + if max == 0 { + max = maxParallelDownloads + } + copySemaphore = semaphore.NewWeighted(int64(max)) } else { copySemaphore = semaphore.NewWeighted(int64(1)) } @@ -896,7 +911,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { return nil } -// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields) +// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields) func layerDigestsDiffer(a, b []types.BlobInfo) bool { if len(a) != len(b) { return true @@ -951,7 +966,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc instanceDigest = &manifestDigest } if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { - return nil, "", errors.Wrap(err, "Error writing manifest") + return nil, "", errors.Wrapf(err, "Error writing manifest %q", string(man)) } return man, manifestDigest, nil } @@ -1049,7 +1064,7 @@ type diffIDResult struct { err error } -// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps compressing it if canCompress, +// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it, // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) { cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" @@ -1058,6 +1073,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. if !diffIDIsNeeded { + // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm + // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing + // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause + // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. + // Fixing that will probably require passing more information to TryReusingBlob() than the current version of + // the ImageDestination interface lets us pass in. reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest) @@ -1115,7 +1136,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. // it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, -// perhaps compressing the stream if canCompress, +// perhaps (de/re/)compressing the stream, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) { @@ -1191,11 +1212,15 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) { // copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, // perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, -// perhaps compressing it if canCompress, +// perhaps (de/re/)compressing it if canModifyBlob, // and returns a complete blobInfo of the copied blob. func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, error) { + if isConfig { // This is guaranteed by the caller, but set it here to be explicit. + canModifyBlob = false + } + // The copying happens through a pipeline of connected io.Readers. // === Input: srcStream @@ -1253,16 +1278,23 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr originalLayerReader = destStream } - desiredCompressionFormat := c.compressionFormat - // === Deal with layer compression/decompression if necessary var inputInfo types.BlobInfo var compressionOperation types.LayerCompression + uploadCompressionFormat := &c.compressionFormat + srcCompressorName := internalblobinfocache.Uncompressed + if isCompressed { + srcCompressorName = compressionFormat.Name() + } + var uploadCompressorName string if canModifyBlob && isOciEncrypted(srcInfo.MediaType) { // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted logrus.Debugf("Using original blob without modification for encrypted blob") compressionOperation = types.PreserveOriginal inputInfo = srcInfo + srcCompressorName = internalblobinfocache.UnknownCompression + uploadCompressorName = internalblobinfocache.UnknownCompression + uploadCompressionFormat = nil } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed { logrus.Debugf("Compressing blob on the fly") compressionOperation = types.Compress @@ -1272,11 +1304,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, // we don’t care. - go c.compressGoroutine(pipeWriter, destStream, desiredCompressionFormat) // Closes pipeWriter + go c.compressGoroutine(pipeWriter, destStream, *uploadCompressionFormat) // Closes pipeWriter destStream = pipeReader inputInfo.Digest = "" inputInfo.Size = -1 - } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && desiredCompressionFormat.Name() != compressionFormat.Name() { + uploadCompressorName = uploadCompressionFormat.Name() + } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && uploadCompressionFormat.Name() != compressionFormat.Name() { // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally // re-compressed using the desired format. logrus.Debugf("Blob will be converted") @@ -1291,11 +1324,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr pipeReader, pipeWriter := io.Pipe() defer pipeReader.Close() - go c.compressGoroutine(pipeWriter, s, desiredCompressionFormat) // Closes pipeWriter + go c.compressGoroutine(pipeWriter, s, *uploadCompressionFormat) // Closes pipeWriter destStream = pipeReader inputInfo.Digest = "" inputInfo.Size = -1 + uploadCompressorName = uploadCompressionFormat.Name() } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed { logrus.Debugf("Blob will be decompressed") compressionOperation = types.Decompress @@ -1307,11 +1341,15 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr destStream = s inputInfo.Digest = "" inputInfo.Size = -1 + uploadCompressorName = internalblobinfocache.Uncompressed + uploadCompressionFormat = nil } else { // PreserveOriginal might also need to recompress the original blob if the desired compression format is different. logrus.Debugf("Using original blob without modification") compressionOperation = types.PreserveOriginal inputInfo = srcInfo + uploadCompressorName = srcCompressorName + uploadCompressionFormat = nil } // Perform image encryption for valid mediatypes if ociEncryptConfig provided @@ -1371,9 +1409,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr uploadedInfo.CompressionOperation = compressionOperation // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. - if canModifyBlob && !isConfig { - uploadedInfo.CompressionAlgorithm = &desiredCompressionFormat - } + uploadedInfo.CompressionAlgorithm = uploadCompressionFormat if decrypted { uploadedInfo.CryptoOperation = types.Decrypt } else if encrypted { @@ -1390,7 +1426,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } } - // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer + // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. // So, read everything from originalLayerReader, which will cause the rest to be // sent there if we are not already at EOF. @@ -1423,6 +1459,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr default: return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) } + if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName) + } + if srcInfo.Digest != "" && srcCompressorName != "" && srcCompressorName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName) + } } return uploadedInfo, nil } diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index 2b81c8360..5cafd2674 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -194,7 +194,9 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { @@ -210,7 +212,6 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo return false, types.BlobInfo{}, err } return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil - } // PutManifest writes manifest to the destination. @@ -251,7 +252,7 @@ func pathExists(path string) (bool, error) { if err == nil { return true, nil } - if err != nil && os.IsNotExist(err) { + if os.IsNotExist(err) { return false, nil } return false, err diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 797be45a2..be46508de 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -23,6 +23,7 @@ import ( "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/pkg/tlsclientconfig" "github.com/containers/image/v5/types" + "github.com/containers/image/v5/version" "github.com/containers/storage/pkg/homedir" clientLib "github.com/docker/distribution/registry/client" "github.com/docker/go-connections/tlsconfig" @@ -65,6 +66,8 @@ var ( {path: "/etc/containers/certs.d", absolute: true}, {path: "/etc/docker/certs.d", absolute: true}, } + + defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)" ) // extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: @@ -92,8 +95,9 @@ type bearerToken struct { // dockerClient is configuration for dealing with a single Docker registry. type dockerClient struct { // The following members are set by newDockerClient and do not change afterwards. - sys *types.SystemContext - registry string + sys *types.SystemContext + registry string + userAgent string // tlsClientConfig is setup by newDockerClient and will be used and updated // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. @@ -200,9 +204,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { logrus.Debugf("error accessing certs directory due to permissions: %v", err) continue } - if err != nil { - return "", err - } + return "", err } return fullCertDirPath, nil } @@ -277,9 +279,15 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc } tlsClientConfig.InsecureSkipVerify = skipVerify + userAgent := defaultUserAgent + if sys != nil && sys.DockerRegistryUserAgent != "" { + userAgent = sys.DockerRegistryUserAgent + } + return &dockerClient{ sys: sys, registry: registry, + userAgent: userAgent, tlsClientConfig: tlsClientConfig, }, nil } @@ -529,9 +537,7 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, req.Header.Add(n, hh) } } - if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { - req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) - } + req.Header.Add("User-Agent", c.userAgent) if auth == v2Auth { if err := c.setupRequestAuth(req, extraScope); err != nil { return nil, err @@ -637,9 +643,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall params.Add("client_id", "containers/image") authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode())) - if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { - authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) - } + authReq.Header.Add("User-Agent", c.userAgent) authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded") logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) res, err := c.client.Do(authReq) @@ -692,9 +696,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, if c.auth.Username != "" && c.auth.Password != "" { authReq.SetBasicAuth(c.auth.Username, c.auth.Password) } - if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { - authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) - } + authReq.Header.Add("User-Agent", c.userAgent) logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) res, err := c.client.Do(authReq) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index ac63ac121..842dcfba6 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -15,6 +15,7 @@ import ( "strings" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/iolimits" "github.com/containers/image/v5/internal/uploadreader" "github.com/containers/image/v5/manifest" @@ -284,7 +285,9 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { @@ -299,17 +302,23 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types. } if exists { cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) - return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil + return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil } // Then try reusing blobs from other locations. - for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) { + bic := blobinfocache.FromBlobInfoCache(cache) + candidates := bic.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) + for _, candidate := range candidates { candidateRepo, err := parseBICLocationReference(candidate.Location) if err != nil { logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) continue } - logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name()) + if candidate.CompressorName != blobinfocache.Uncompressed { + logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name()) + } else { + logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name()) + } // Sanity checks: if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { @@ -351,8 +360,16 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types. continue } } - cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) - return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil + + bic.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) + + compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName) + if err != nil { + logrus.Debugf("... Failed: %v", err) + continue + } + + return true, types.BlobInfo{Digest: candidate.Digest, MediaType: info.MediaType, Size: size, CompressionOperation: compressionOperation, CompressionAlgorithm: compressionAlgorithm}, nil } return false, types.BlobInfo{}, nil diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 70ca7661e..2fe68ea27 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -64,6 +64,11 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef } attempts := []attempt{} for _, pullSource := range pullSources { + if sys.DockerLogMirrorChoice { + logrus.Infof("Trying to access %q", pullSource.Reference) + } else { + logrus.Debugf("Trying to access %q", pullSource.Reference) + } logrus.Debugf("Trying to access %q", pullSource.Reference) s, err := newImageSourceAttempt(ctx, sys, ref, pullSource) if err == nil { diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go index 41d2c5e81..9559dfb56 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -159,7 +159,9 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/lookaside.go index 06d616d01..84a0fa4ee 100644 --- a/vendor/github.com/containers/image/v5/docker/lookaside.go +++ b/vendor/github.com/containers/image/v5/docker/lookaside.go @@ -96,10 +96,16 @@ func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, // registriesDirPath returns a path to registries.d func registriesDirPath(sys *types.SystemContext) string { + return registriesDirPathWithHomeDir(sys, homedir.Get()) +} + +// registriesDirPathWithHomeDir is an internal implementation detail of registriesDirPath, +// it exists only to allow testing it with an artificial home directory. +func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) string { if sys != nil && sys.RegistriesDirPath != "" { return sys.RegistriesDirPath } - userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir) + userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir) if _, err := os.Stat(userRegistriesDirPath); err == nil { return userRegistriesDirPath } diff --git a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go index 61d9aab9a..94e9e5f23 100644 --- a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go +++ b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go @@ -52,5 +52,26 @@ func DockerReferenceNamespaces(ref reference.Named) []string { } name = name[:lastSlash] } + + // Strip port number if any, before appending to res slice. + // Currently, the most compatible behavior is to return + // example.com:8443/ns, example.com:8443, *.com. + // If a port number is not specified, the expected behavior would be + // example.com/ns, example.com, *.com + portNumColon := strings.Index(name, ":") + if portNumColon != -1 { + name = name[:portNumColon] + } + + // Append wildcarded domains to res slice + for { + firstDot := strings.Index(name, ".") + if firstDot == -1 { + break + } + name = name[firstDot+1:] + + res = append(res, "*."+name) + } return res } diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go index e16829d96..4f2465cac 100644 --- a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go @@ -86,7 +86,9 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go index e4e01d5d9..61ca83364 100644 --- a/vendor/github.com/containers/image/v5/image/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go @@ -154,6 +154,9 @@ func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError +// if the CompressionOperation and CompressionAlgorithm specified in one or more +// options.LayerInfos items is anything other than gzip. func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. src: m.src, diff --git a/vendor/github.com/containers/image/v5/image/oci.go b/vendor/github.com/containers/image/v5/image/oci.go index 5cb04f979..58e9c03ba 100644 --- a/vendor/github.com/containers/image/v5/image/oci.go +++ b/vendor/github.com/containers/image/v5/image/oci.go @@ -134,6 +134,10 @@ func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdat // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError +// if the combination of CompressionOperation and CompressionAlgorithm specified +// in one or more options.LayerInfos items indicates that a layer is compressed using +// an algorithm that is not allowed in OCI. func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. src: m.src, diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go new file mode 100644 index 000000000..1dceaa669 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go @@ -0,0 +1,63 @@ +package blobinfocache + +import ( + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" +) + +// FromBlobInfoCache returns a BlobInfoCache2 based on a BlobInfoCache, returning the original +// object if it implements BlobInfoCache2, or a wrapper which discards compression information +// if it only implements BlobInfoCache. +func FromBlobInfoCache(bic types.BlobInfoCache) BlobInfoCache2 { + if bic2, ok := bic.(BlobInfoCache2); ok { + return bic2 + } + return &v1OnlyBlobInfoCache{ + BlobInfoCache: bic, + } +} + +type v1OnlyBlobInfoCache struct { + types.BlobInfoCache +} + +func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { +} + +func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 { + return nil +} + +// CandidateLocationsFromV2 converts a slice of BICReplacementCandidate2 to a slice of +// types.BICReplacementCandidate, dropping compression information. +func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.BICReplacementCandidate { + candidates := make([]types.BICReplacementCandidate, 0, len(v2candidates)) + for _, c := range v2candidates { + candidates = append(candidates, types.BICReplacementCandidate{ + Digest: c.Digest, + Location: c.Location, + }) + } + return candidates +} + +// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm +// values suitable for inclusion in a types.BlobInfo structure, based on the name of the +// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by +// TryReusingBlob() implementations to set values in the BlobInfo structure that they return +// upon success. +func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compression.Algorithm, error) { + switch compressorName { + case Uncompressed: + return types.Decompress, nil, nil + case UnknownCompression: + return types.PreserveOriginal, nil, nil + default: + algo, err := compression.AlgorithmByName(compressorName) + if err == nil { + return types.Compress, &algo, nil + } + return types.PreserveOriginal, nil, err + } +} diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go new file mode 100644 index 000000000..3c2be57f3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go @@ -0,0 +1,45 @@ +package blobinfocache + +import ( + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" +) + +const ( + // Uncompressed is the value we store in a blob info cache to indicate that we know that + // the blob in the corresponding location is not compressed. + Uncompressed = "uncompressed" + // UnknownCompression is the value we store in a blob info cache to indicate that we don't + // know if the blob in the corresponding location is compressed (and if so, how) or not. + UnknownCompression = "unknown" +) + +// BlobInfoCache2 extends BlobInfoCache by adding the ability to track information about what kind +// of compression was applied to the blobs it keeps information about. +type BlobInfoCache2 interface { + types.BlobInfoCache + // RecordDigestCompressorName records a compressor for the blob with the specified digest, + // or Uncompressed or UnknownCompression. + // WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a + // digest just because some remote author claims so (e.g. because a manifest says so); + // otherwise the cache could be poisoned and cause us to make incorrect edits to type + // information in a manifest. + RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) + // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations + // that could possibly be reused within the specified (transport scope) (if they still + // exist, which is not guaranteed). + // + // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if + // canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look + // up variants of the blob which have the same uncompressed digest. + // + // The CompressorName fields in returned data must never be UnknownCompression. + CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 +} + +// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. +type BICReplacementCandidate2 struct { + Digest digest.Digest + CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression + Location types.BICLocationReference +} diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index fa2b39e0e..3ece948a0 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -5,7 +5,6 @@ import ( "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/types" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -54,6 +53,12 @@ const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that mean // compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil // to mean "no compression"), based on variantTable. +// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants +// that differ only in what type of compression is applied, but it can't be combined with this +// algorithm to produce an updated MIME type that complies with the standard that defines mimeType. +// If the compression algorithm is unrecognized, or mimeType is not known to have variants that +// differ from it only in what type of compression has been applied, the returned error will not be +// a ManifestLayerCompressionIncompatibilityError. func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compression.Algorithm) (string, error) { if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries return "", fmt.Errorf("cannot update unknown MIME type") @@ -70,15 +75,15 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType return res, nil } if name != mtsUncompressed { - return "", fmt.Errorf("%s compression is not supported", name) + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mt)} } - return "", errors.New("uncompressed variant is not supported") + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} } if name != mtsUncompressed { - return "", fmt.Errorf("unknown compression algorithm %s", name) + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mt)} } // We can't very well say “the idea of no compression is unknown” - return "", errors.New("uncompressed variant is not supported") + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} } } } @@ -89,7 +94,11 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType } // updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to -// mimeType, based on variantTable. It may use updated.Digest for error messages. +// mimeType, based on variantTable. It may use updated.Digest for error messages. +// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants +// that differ only in what type of compression is applied, but applying updated.CompressionOperation +// and updated.CompressionAlgorithm to it won't produce an updated MIME type that complies with the +// standard that defines mimeType. func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, updated types.BlobInfo) (string, error) { // Note that manifests in containers-storage might be reporting the // wrong media type since the original manifests are stored while layers @@ -99,6 +108,12 @@ func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, upd // {de}compressed. switch updated.CompressionOperation { case types.PreserveOriginal: + // Force a change to the media type if we're being told to use a particular compressor, + // since it might be different from the one associated with the media type. Otherwise, + // try to keep the original media type. + if updated.CompressionAlgorithm != nil { + return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm) + } // Keep the original media type. return mimeType, nil @@ -116,3 +131,14 @@ func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, upd return "", fmt.Errorf("unknown compression operation (%d)", updated.CompressionOperation) } } + +// ManifestLayerCompressionIncompatibilityError indicates that a specified compression algorithm +// could not be applied to a layer MIME type. A caller that receives this should either retry +// the call with a different compression algorithm, or attempt to use a different manifest type. +type ManifestLayerCompressionIncompatibilityError struct { + text string +} + +func (m ManifestLayerCompressionIncompatibilityError) Error() string { + return m.text +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go index 8d8bb9e01..6cb605263 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -226,6 +226,8 @@ var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{ } // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and +// CompressionAlgorithm that would result in anything other than gzip compression. func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { if len(m.LayersDescriptors) != len(layerInfos) { return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index 292614593..c6299d8e6 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -108,6 +108,8 @@ var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{ } // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers) +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and +// CompressionAlgorithm that isn't supported by OCI. func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { if len(m.Layers) != len(layerInfos) { return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go index 23d471325..c874eb775 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -103,7 +103,9 @@ func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Read // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index 0c88e1ef0..1230e8ca3 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -186,7 +186,9 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { @@ -204,6 +206,7 @@ func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.Blo if err != nil { return false, types.BlobInfo{}, err } + return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go index c4c84dd54..426046e66 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -410,7 +410,9 @@ func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reade // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go index b518122e2..c91a49c57 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -339,7 +339,9 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go index 200dab593..2c211b8b8 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go @@ -7,6 +7,7 @@ import ( "sync" "time" + "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" @@ -22,6 +23,9 @@ var ( // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. uncompressedDigestBucket = []byte("uncompressedDigest") + // digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed + // It may not exist in caches created by older versions, even if uncompressedDigestBucket is present. + digestCompressorBucket = []byte("digestCompressor") // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest // (as a set of key=digest, value="" pairs) digestByUncompressedBucket = []byte("digestByUncompressed") @@ -95,6 +99,9 @@ type cache struct { // // Most users should call blobinfocache.DefaultCache instead. func New(path string) types.BlobInfoCache { + return new2(path) +} +func new2(path string) *cache { return &cache{path: path} } @@ -220,6 +227,30 @@ func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } +// RecordDigestCompressorName records that the blob with digest anyDigest was compressed with the specified +// compressor, or is blobinfocache.Uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (bdc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(digestCompressorBucket) + if err != nil { + return err + } + key := []byte(anyDigest.String()) + if previousBytes := b.Get(key); previousBytes != nil { + if string(previousBytes) != compressorName { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, string(previousBytes), compressorName) + } + } + if compressorName == blobinfocache.UnknownCompression { + return b.Delete(key) + } + return b.Put(key, []byte(compressorName)) + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, // and can be reused given the opaque location data. func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { @@ -251,21 +282,34 @@ func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope type }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } -// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates. -func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime { - b := scopeBucket.Bucket([]byte(digest.String())) +// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates. +func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { + digestKey := []byte(digest.String()) + b := scopeBucket.Bucket(digestKey) if b == nil { return candidates } + compressorName := blobinfocache.UnknownCompression + if compressionBucket != nil { + // the bucket won't exist if the cache was created by a v1 implementation and + // hasn't yet been updated by a v2 implementation + if compressorNameValue := compressionBucket.Get(digestKey); len(compressorNameValue) > 0 { + compressorName = string(compressorNameValue) + } + } + if compressorName == blobinfocache.UnknownCompression && requireCompressionInfo { + return candidates + } _ = b.ForEach(func(k, v []byte) error { t := time.Time{} if err := t.UnmarshalBinary(v); err != nil { return err } candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: types.BICReplacementCandidate{ - Digest: digest, - Location: types.BICLocationReference{Opaque: string(k)}, + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: types.BICLocationReference{Opaque: string(k)}, }, LastSeen: t, }) @@ -274,13 +318,17 @@ func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW return candidates } -// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. -func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { +func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { + return bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, true) +} + +func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { res := []prioritize.CandidateWithTime{} var uncompressedDigestValue digest.Digest // = "" if err := bdc.view(func(tx *bolt.Tx) error { @@ -296,8 +344,11 @@ func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types if scopeBucket == nil { return nil } + // compressionBucket won't have been created if previous writers never recorded info about compression, + // and we don't want to fail just because of that + compressionBucket := tx.Bucket(digestCompressorBucket) - res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, requireCompressionInfo) if canSubstitute { if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { b := tx.Bucket(digestByUncompressedBucket) @@ -310,7 +361,7 @@ func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types return err } if d != primaryDigest && d != uncompressedDigestValue { - res = bdc.appendReplacementCandidates(res, scopeBucket, d) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, requireCompressionInfo) } return nil }); err != nil { @@ -319,14 +370,24 @@ func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types } } if uncompressedDigestValue != primaryDigest { - res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, requireCompressionInfo) } } } return nil }); err != nil { // Including os.IsNotExist(err) - return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)? + return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)? } return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue) } + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, false)) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go index 5deca4a82..6f5506d94 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -6,7 +6,7 @@ import ( "sort" "time" - "github.com/containers/image/v5/types" + "github.com/containers/image/v5/internal/blobinfocache" "github.com/opencontainers/go-digest" ) @@ -17,8 +17,8 @@ const replacementAttempts = 5 // CandidateWithTime is the input to types.BICReplacementCandidate prioritization. type CandidateWithTime struct { - Candidate types.BICReplacementCandidate // The replacement candidate - LastSeen time.Time // Time the candidate was last known to exist (either read or written) + Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate + LastSeen time.Time // Time the candidate was last known to exist (either read or written) } // candidateSortState is a local state implementing sort.Interface on candidates to prioritize, @@ -79,7 +79,7 @@ func (css *candidateSortState) Swap(i, j int) { // destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the // number of entries to limit, only to make testing simpler. -func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate { +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 { // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should // compare equal. sort.Sort(&candidateSortState{ @@ -92,7 +92,7 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, if resLength > maxCandidates { resLength = maxCandidates } - res := make([]types.BICReplacementCandidate, resLength) + res := make([]blobinfocache.BICReplacementCandidate2, resLength) for i := range res { res[i] = cs[i].Candidate } @@ -105,6 +105,6 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, // // WARNING: The array of candidates is destructively modified. (The implementation of this function could of course // make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) -func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate { +func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 { return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index 8f28c6623..3d598057e 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" @@ -25,6 +26,7 @@ type cache struct { uncompressedDigests map[digest.Digest]digest.Digest digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference + compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest } // New returns a BlobInfoCache implementation which is in-memory only. @@ -36,10 +38,15 @@ type cache struct { // Manual users of types.{ImageSource,ImageDestination} might also use // this instead of a persistent cache. func New() types.BlobInfoCache { + return new2() +} + +func new2() *cache { return &cache{ uncompressedDigests: map[digest.Digest]digest.Digest{}, digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, + compressors: map[digest.Digest]string{}, } } @@ -101,14 +108,34 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type locationScope[location] = time.Now() // Possibly overwriting an older entry. } +// RecordDigestCompressorName records that the blob with the specified digest is either compressed with the specified +// algorithm, or uncompressed, or that we no longer know. +func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if compressorName == blobinfocache.UnknownCompression { + delete(mem.compressors, blobDigest) + return + } + mem.compressors[blobDigest] = compressorName +} + // appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. -func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime { +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present for l, t := range locations { + compressorName, compressorKnown := mem.compressors[digest] + if !compressorKnown { + if requireCompressionInfo { + continue + } + compressorName = blobinfocache.UnknownCompression + } candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: types.BICReplacementCandidate{ - Digest: digest, - Location: l, + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: l, }, LastSeen: t, }) @@ -123,21 +150,35 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false)) +} + +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { + return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true) +} + +func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { mem.mutex.Lock() defer mem.mutex.Unlock() res := []prioritize.CandidateWithTime{} - res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest) + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo) var uncompressedDigest digest.Digest // = "" if canSubstitute { if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map for d := range otherDigests { if d != primaryDigest && d != uncompressedDigest { - res = mem.appendReplacementCandidates(res, transport, scope, d) + res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo) } } if uncompressedDigest != primaryDigest { - res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest) + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo) } } } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go index fa1879afd..2a54ff312 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go @@ -2,6 +2,7 @@ package none import ( + "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" ) @@ -16,7 +17,7 @@ type noCache struct { // Manifest.Inspect, because configs only have one representation. // Any use of BlobInfoCache with blobs should usually use at least a // short-lived cache, ideally blobinfocache.DefaultCache. -var NoCache types.BlobInfoCache = noCache{} +var NoCache blobinfocache.BlobInfoCache2 = blobinfocache.FromBlobInfoCache(&noCache{}) // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. // May return anyDigest if it is known to be uncompressed. diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index cf82ee861..983df41d8 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -86,7 +86,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Note: we need to read the auth files in the inverse order to prevent // a priority inversion when writing to the map. authConfigs := make(map[string]types.DockerAuthConfig) - paths := getAuthFilePaths(sys) + paths := getAuthFilePaths(sys, homedir.Get()) for i := len(paths) - 1; i >= 0; i-- { path := paths[i] // readJSONFile returns an empty map in case the path doesn't exist. @@ -126,7 +126,9 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // getAuthFilePaths returns a slice of authPaths based on the system context // in the order they should be searched. Note that some paths may not exist. -func getAuthFilePaths(sys *types.SystemContext) []authPath { +// The homeDir parameter should always be homedir.Get(), and is only intended to be overridden +// by tests. +func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath { paths := []authPath{} pathToAuth, lf, err := getPathToAuth(sys) if err == nil { @@ -139,7 +141,7 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath { } xdgCfgHome := os.Getenv("XDG_CONFIG_HOME") if xdgCfgHome == "" { - xdgCfgHome = filepath.Join(homedir.Get(), ".config") + xdgCfgHome = filepath.Join(homeDir, ".config") } paths = append(paths, authPath{path: filepath.Join(xdgCfgHome, xdgConfigHomePath), legacyFormat: false}) if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" { @@ -148,11 +150,11 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath { ) } else { paths = append(paths, - authPath{path: filepath.Join(homedir.Get(), dockerHomePath), legacyFormat: false}, + authPath{path: filepath.Join(homeDir, dockerHomePath), legacyFormat: false}, ) } paths = append(paths, - authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true}, + authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true}, ) return paths } @@ -161,6 +163,12 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath { // file or .docker/config.json, including support for OAuth2 and IdentityToken. // If an entry is not found, an empty struct is returned. func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) { + return getCredentialsWithHomeDir(sys, registry, homedir.Get()) +} + +// getCredentialsWithHomeDir is an internal implementation detail of GetCredentials, +// it exists only to allow testing it with an artificial home directory. +func getCredentialsWithHomeDir(sys *types.SystemContext, registry, homeDir string) (types.DockerAuthConfig, error) { if sys != nil && sys.DockerAuthConfig != nil { logrus.Debug("Returning credentials from DockerAuthConfig") return *sys.DockerAuthConfig, nil @@ -177,7 +185,7 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth } } - for _, path := range getAuthFilePaths(sys) { + for _, path := range getAuthFilePaths(sys, homeDir) { authConfig, err := findAuthentication(registry, path.path, path.legacyFormat) if err != nil { logrus.Debugf("Credentials not found") @@ -203,7 +211,13 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth // GetCredentials API. The new API should be used and this API is kept to // maintain backward compatibility. func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { - auth, err := GetCredentials(sys, registry) + return getAuthenticationWithHomeDir(sys, registry, homedir.Get()) +} + +// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication, +// it exists only to allow testing it with an artificial home directory. +func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir string) (string, string, error) { + auth, err := getCredentialsWithHomeDir(sys, registry, homeDir) if err != nil { return "", "", err } @@ -262,6 +276,12 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { // getPathToAuth gets the path of the auth.json file used for reading and writing credentials // returns the path, and a bool specifies whether the file is in legacy format func getPathToAuth(sys *types.SystemContext) (string, bool, error) { + return getPathToAuthWithOS(sys, runtime.GOOS) +} + +// getPathToAuthWithOS is an internal implementation detail of getPathToAuth, +// it exists only to allow testing it with an artificial runtime.GOOS. +func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, error) { if sys != nil { if sys.AuthFilePath != "" { return sys.AuthFilePath, false, nil @@ -273,7 +293,7 @@ func getPathToAuth(sys *types.SystemContext) (string, bool, error) { return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil } } - if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + if goOS == "windows" || goOS == "darwin" { return filepath.Join(homedir.Get(), nonLinuxAuthFilePath), false, nil } diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go index 198ac1cc6..f1e5c453e 100644 --- a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go @@ -149,9 +149,9 @@ const ( func (r *Resolved) Description() string { switch r.rationale { case rationaleAlias: - return fmt.Sprintf("Resolved short name %q to a recorded short-name alias (origin: %s)", r.userInput, r.originDescription) + return fmt.Sprintf("Resolved %q as an alias (%s)", r.userInput, r.originDescription) case rationaleUSR: - return fmt.Sprintf("Completed short name %q with unqualified-search registries (origin: %s)", r.userInput, r.originDescription) + return fmt.Sprintf("Resolving %q using unqualified-search registries (%s)", r.userInput, r.originDescription) case rationaleUserSelection, rationaleNone: fallthrough default: @@ -240,14 +240,14 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { // Create a copy of the system context to make it usable beyond this // function call. - var sys *types.SystemContext if ctx != nil { - sys = &(*ctx) + copy := *ctx + ctx = © } resolved.systemContext = ctx // Detect which mode we're running in. - mode, err := sysregistriesv2.GetShortNameMode(sys) + mode, err := sysregistriesv2.GetShortNameMode(ctx) if err != nil { return nil, err } @@ -276,7 +276,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { resolved.userInput = shortNameRepo // If there's already an alias, use it. - namedAlias, aliasOriginDescription, err := sysregistriesv2.ResolveShortNameAlias(sys, shortNameRepo.String()) + namedAlias, aliasOriginDescription, err := sysregistriesv2.ResolveShortNameAlias(ctx, shortNameRepo.String()) if err != nil { return nil, err } @@ -307,7 +307,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { resolved.rationale = rationaleUSR // Query the registry for unqualified-search registries. - unqualifiedSearchRegistries, usrConfig, err := sysregistriesv2.UnqualifiedSearchRegistriesWithOrigin(sys) + unqualifiedSearchRegistries, usrConfig, err := sysregistriesv2.UnqualifiedSearchRegistriesWithOrigin(ctx) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index 89ad7c533..b0dff5447 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -405,9 +405,15 @@ type configWrapper struct { // newConfigWrapper returns a configWrapper for the specified SystemContext. func newConfigWrapper(ctx *types.SystemContext) configWrapper { + return newConfigWrapperWithHomeDir(ctx, homedir.Get()) +} + +// newConfigWrapperWithHomeDir is an internal implementation detail of newConfigWrapper, +// it exists only to allow testing it with an artificial home directory. +func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) configWrapper { var wrapper configWrapper - userRegistriesFilePath := filepath.Join(homedir.Get(), userRegistriesFile) - userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir) + userRegistriesFilePath := filepath.Join(homeDir, userRegistriesFile) + userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir) // decide configPath using per-user path or system file if ctx != nil && ctx.SystemRegistriesConfPath != "" { diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go index d8cc4a09b..65022430d 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -59,10 +59,16 @@ func DefaultPolicy(sys *types.SystemContext) (*Policy, error) { // defaultPolicyPath returns a path to the default policy of the system. func defaultPolicyPath(sys *types.SystemContext) string { + return defaultPolicyPathWithHomeDir(sys, homedir.Get()) +} + +// defaultPolicyPathWithHomeDir is an internal implementation detail of defaultPolicyPath, +// it exists only to allow testing it with an artificial home directory. +func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) string { if sys != nil && sys.SignaturePolicyPath != "" { return sys.SignaturePolicyPath } - userPolicyFilePath := filepath.Join(homedir.Get(), userPolicyFile) + userPolicyFilePath := filepath.Join(homeDir, userPolicyFile) if _, err := os.Stat(userPolicyFilePath); err == nil { return userPolicyFilePath } diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index d24f8bbee..924d684ae 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -463,7 +463,9 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/tarball/doc.go b/vendor/github.com/containers/image/v5/tarball/doc.go index ead2d4263..e9d321b8f 100644 --- a/vendor/github.com/containers/image/v5/tarball/doc.go +++ b/vendor/github.com/containers/image/v5/tarball/doc.go @@ -5,11 +5,13 @@ // package main // // import ( -// "fmt" +// "context" // // cp "github.com/containers/image/v5/copy" +// "github.com/containers/image/v5/signature" // "github.com/containers/image/v5/tarball" // "github.com/containers/image/v5/transports/alltransports" +// "github.com/containers/image/v5/types" // imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" // ) // @@ -39,7 +41,18 @@ // if err != nil { // panic(err) // } -// err = cp.Image(nil, dest, src, nil) +// +// policy, err := signature.DefaultPolicy(nil) +// if err != nil { +// panic(err) +// } +// +// pc, err := signature.NewPolicyContext(policy) +// if err != nil { +// panic(err) +// } +// defer pc.Destroy() +// _, err = cp.Image(context.TODO(), pc, dest, src, nil) // if err != nil { // panic(err) // } diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 3c5126b4e..8655ca443 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -126,14 +126,18 @@ type BlobInfo struct { Annotations map[string]string MediaType string // CompressionOperation is used in Image.UpdateLayerInfos to instruct - // whether the original layer should be preserved or (de)compressed. The - // field defaults to preserve the original layer. + // whether the original layer's "compressed or not" should be preserved, + // possibly while changing the compression algorithm from one to another, + // or if it should be compressed or decompressed. The field defaults to + // preserve the original layer's compressedness. // TODO: To remove together with CryptoOperation in re-design to remove // field out out of BlobInfo. CompressionOperation LayerCompression // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be - // set when `CompressionOperation == Compress`. + // set when `CompressionOperation == Compress` and MAY be set when + // `CompressionOperation == PreserveOriginal` and the compression type is + // being changed for an already-compressed layer. CompressionAlgorithm *compression.Algorithm // CryptoOperation is used in Image.UpdateLayerInfos to instruct // whether the original layer was encrypted/decrypted @@ -194,6 +198,9 @@ type BICReplacementCandidate struct { // // None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; // users of the cache should just fall back to copying the blobs the usual way. +// +// The BlobInfoCache interface is deprecated. Consumers of this library should use one of the implementations provided by +// subpackages of the library's "pkg/blobinfocache" package in preference to implementing the interface on their own. type BlobInfoCache interface { // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. // May return anyDigest if it is known to be uncompressed. @@ -306,7 +313,9 @@ type ImageDestination interface { // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. - // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. + // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may + // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be + // reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) @@ -397,6 +406,12 @@ type Image interface { // UpdatedImage returns a types.Image modified according to options. // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. // This does not change the state of the original Image object. + // The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if + // manifests of type options.ManifestMIMEType can not include layers that are compressed + // in accordance with the CompressionOperation and CompressionAlgorithm specified in one + // or more options.LayerInfos items, though retrying with a different + // options.ManifestMIMEType or with different CompressionOperation+CompressionAlgorithm + // values might succeed. UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) // SupportsEncryption returns an indicator that the image supports encryption // @@ -600,6 +615,8 @@ type SystemContext struct { DockerDisableV1Ping bool // If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list DockerDisableDestSchema1MIMETypes bool + // If true, the physical pull source of docker transport images logged as info level + DockerLogMirrorChoice bool // Directory to use for OSTree temporary files OSTreeTmpDirPath string diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 48ecf938c..e8733f8c9 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,7 +6,7 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 9 + VersionMinor = 10 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 diff --git a/vendor/github.com/klauspost/compress/flate/gen_inflate.go b/vendor/github.com/klauspost/compress/flate/gen_inflate.go deleted file mode 100644 index 35fc072a3..000000000 --- a/vendor/github.com/klauspost/compress/flate/gen_inflate.go +++ /dev/null @@ -1,294 +0,0 @@ -// +build generate - -//go:generate go run $GOFILE && gofmt -w inflate_gen.go - -package main - -import ( - "os" - "strings" -) - -func main() { - f, err := os.Create("inflate_gen.go") - if err != nil { - panic(err) - } - defer f.Close() - types := []string{"*bytes.Buffer", "*bytes.Reader", "*bufio.Reader", "*strings.Reader"} - names := []string{"BytesBuffer", "BytesReader", "BufioReader", "StringsReader"} - imports := []string{"bytes", "bufio", "io", "strings", "math/bits"} - f.WriteString(`// Code generated by go generate gen_inflate.go. DO NOT EDIT. - -package flate - -import ( -`) - - for _, imp := range imports { - f.WriteString("\t\"" + imp + "\"\n") - } - f.WriteString(")\n\n") - - template := ` - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) $FUNCNAME$() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.($TYPE$) - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := fr.ReadByte() - if err != nil { - f.b = b - f.nb = nb - f.err = noEOF(err) - return - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).$FUNCNAME$ - f.stepState = stateInit - return - } - goto readLiteral - case v == 256: - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for f.nb < n { - c, err := fr.ReadByte() - if err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - return - } - - var dist uint32 - if f.hd == nil { - for f.nb < 5 { - c, err := fr.ReadByte() - if err != nil { - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 - } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := fr.ReadByte() - if err != nil { - f.b = b - f.nb = nb - f.err = noEOF(err) - return - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { - c, err := fr.ReadByte() - if err != nil { - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - default: - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { - if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).$FUNCNAME$ // We need to continue this work - f.stepState = stateDict - return - } - goto readLiteral - } -} - -` - for i, t := range types { - s := strings.Replace(template, "$FUNCNAME$", "huffman"+names[i], -1) - s = strings.Replace(s, "$TYPE$", t, -1) - f.WriteString(s) - } - f.WriteString("func (f *decompressor) huffmanBlockDecoder() func() {\n") - f.WriteString("\tswitch f.r.(type) {\n") - for i, t := range types { - f.WriteString("\t\tcase " + t + ":\n") - f.WriteString("\t\t\treturn f.huffman" + names[i] + "\n") - } - f.WriteString("\t\tdefault:\n") - f.WriteString("\t\t\treturn f.huffmanBlockGeneric") - f.WriteString("\t}\n}\n") -} diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md index e12da4db2..8b6e5c663 100644 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -14,7 +14,9 @@ but it can be used as a secondary step to compressors (like Snappy) that does no ## News - * Mar 2018: First implementation released. Consider this beta software for now. +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. # Usage diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 62fd37324..1d41c25d2 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -5,7 +5,6 @@ package zstd import ( - "bytes" "errors" "io" "sync" @@ -179,11 +178,13 @@ func (d *Decoder) Reset(r io.Reader) error { } // If bytes buffer and < 1MB, do sync decoding anyway. - if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 { + if bb, ok := r.(byter); ok && bb.Len() < 1<<20 { + var bb2 byter + bb2 = bb if debug { println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) } - b := bb.Bytes() + b := bb2.Bytes() var dst []byte if cap(d.current.b) > 0 { dst = d.current.b diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 0c761dd62..9056beef2 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -4,6 +4,7 @@ package zstd import ( + "bytes" "errors" "log" "math" @@ -146,3 +147,10 @@ func load64(b []byte, i int) uint64 { return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 } + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/ulikunitz/xz/SECURITY.md b/vendor/github.com/ulikunitz/xz/SECURITY.md new file mode 100644 index 000000000..5f7ec01b3 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/SECURITY.md @@ -0,0 +1,10 @@ +# Security Policy + +## Supported Versions + +Currently the last minor version v0.5.x is supported. + +## Reporting a Vulnerability + +Report a vulnerability by creating a Github issue at +. Expect a response in a week. diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md index 84bd5dcbd..88c7341c8 100644 --- a/vendor/github.com/ulikunitz/xz/TODO.md +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -8,19 +8,17 @@ 1. Review encoder and check for lzma improvements under xz. 2. Fix binary tree matcher. -3. Compare compression ratio with xz tool using comparable parameters - and optimize parameters -4. Do some optimizations - - rename operation action and make it a simple type of size 8 - - make maxMatches, wordSize parameters - - stop searching after a certain length is found (parameter sweetLen) +3. Compare compression ratio with xz tool using comparable parameters and optimize parameters +4. rename operation action and make it a simple type of size 8 +5. make maxMatches, wordSize parameters +6. stop searching after a certain length is found (parameter sweetLen) ## Release v0.7 1. Optimize code 2. Do statistical analysis to get linear presets. 3. Test sync.Pool compatability for xz and lzma Writer and Reader -3. Fuzz optimized code. +4. Fuzz optimized code. ## Release v0.8 @@ -44,53 +42,73 @@ ## Package lzma -### Release v0.6 - -- Rewrite Encoder into a simple greedy one-op-at-a-time encoder - including - + simple scan at the dictionary head for the same byte - + use the killer byte (requiring matches to get longer, the first - test should be the byte that would make the match longer) +### v0.6 +* Rewrite Encoder into a simple greedy one-op-at-a-time encoder including + * simple scan at the dictionary head for the same byte + * use the killer byte (requiring matches to get longer, the first test should be the byte that would make the match longer) ## Optimizations -- There may be a lot of false sharing in lzma.State; check whether this - can be improved by reorganizing the internal structure of it. -- Check whether batching encoding and decoding improves speed. +* There may be a lot of false sharing in lzma. State; check whether this can be improved by reorganizing the internal structure of it. + +* Check whether batching encoding and decoding improves speed. ### DAG optimizations -- Use full buffer to create minimal bit-length above range encoder. -- Might be too slow (see v0.4) +* Use full buffer to create minimal bit-length above range encoder. +* Might be too slow (see v0.4) ### Different match finders -- hashes with 2, 3 characters additional to 4 characters -- binary trees with 2-7 characters (uint64 as key, use uint32 as +* hashes with 2, 3 characters additional to 4 characters +* binary trees with 2-7 characters (uint64 as key, use uint32 as + pointers into a an array) -- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + +* rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + into an array with bit-steeling for the colors) ## Release Procedure -- execute goch -l for all packages; probably with lower param like 0.5. -- check orthography with gospell -- Write release notes in doc/relnotes. -- Update README.md -- xb copyright . in xz directory to ensure all new files have Copyright - header -- VERSION= go generate github.com/ulikunitz/xz/... to update - version files -- Execute test for Linux/amd64, Linux/x86 and Windows/amd64. -- Update TODO.md - write short log entry -- git checkout master && git merge dev -- git tag -a -- git push +* execute goch -l for all packages; probably with lower param like 0.5. +* check orthography with gospell +* Write release notes in doc/relnotes. +* Update README.md +* xb copyright . in xz directory to ensure all new files have Copyright header +* `VERSION= go generate github.com/ulikunitz/xz/...` to update version files +* Execute test for Linux/amd64, Linux/x86 and Windows/amd64. +* Update TODO.md - write short log entry +* `git checkout master && git merge dev` +* `git tag -a ` +* `git push` ## Log -## 2020-08-19 +### 2020-12-17 + +Release v0.5.9 fixes warnings, a typo and adds SECURITY.md. + +One fix is interesting. + +```go +const ( + a byte = 0x1 + b = 0x2 +) +``` + +The constants a and b don't have the same type. Correct is + +```go +const ( + a byte = 0x1 + b byte = 0x2 +) +``` + +### 2020-08-19 Release v0.5.8 fixes issue [issue #35](https://github.com/ulikunitz/xz/issues/35). @@ -208,8 +226,8 @@ MININT. ### 2015-06-04 -It has been a productive day. I improved the interface of lzma.Reader -and lzma.Writer and fixed the error handling. +It has been a productive day. I improved the interface of lzma. Reader +and lzma. Writer and fixed the error handling. ### 2015-06-01 @@ -260,7 +278,7 @@ needed anymore. However I will implement a ReaderState and WriterState type to use static typing to ensure the right State object is combined with the -right lzbase.Reader and lzbase.Writer. +right lzbase. Reader and lzbase. Writer. As a start I have implemented ReaderState and WriterState to ensure that the state for reading is only used by readers and WriterState only @@ -282,11 +300,11 @@ old lzma package has been completely removed. ### 2015-04-05 -Implemented lzma.Reader and tested it. +Implemented lzma. Reader and tested it. ### 2015-04-04 -Implemented baseReader by adapting code form lzma.Reader. +Implemented baseReader by adapting code form lzma. Reader. ### 2015-04-03 @@ -302,7 +320,7 @@ However in Francesco Campoy's presentation "Go for Javaneros (Javaïstes?)" is the the idea that using an embedded field E, all the methods of E will be defined on T. If E is an interface T satisfies E. -https://talks.golang.org/2014/go4java.slide#51 + I have never used this, but it seems to be a cool idea. @@ -327,11 +345,11 @@ and the opCodec. 1. Implemented simple lzmago tool 2. Tested tool against large 4.4G file - - compression worked correctly; tested decompression with lzma - - decompression hits a full buffer condition + * compression worked correctly; tested decompression with lzma + * decompression hits a full buffer condition 3. Fixed a bug in the compressor and wrote a test for it 4. Executed full cycle for 4.4 GB file; performance can be improved ;-) ### 2015-01-11 -- Release v0.2 because of the working LZMA encoder and decoder +* Release v0.2 because of the working LZMA encoder and decoder diff --git a/vendor/github.com/ulikunitz/xz/format.go b/vendor/github.com/ulikunitz/xz/format.go index edfec9a94..84b58c9dd 100644 --- a/vendor/github.com/ulikunitz/xz/format.go +++ b/vendor/github.com/ulikunitz/xz/format.go @@ -47,9 +47,9 @@ const HeaderLen = 12 // Constants for the checksum methods supported by xz. const ( None byte = 0x0 - CRC32 = 0x1 - CRC64 = 0x4 - SHA256 = 0xa + CRC32 byte = 0x1 + CRC64 byte = 0x4 + SHA256 byte = 0xa ) // errInvalidFlags indicates that flags are invalid. @@ -569,22 +569,6 @@ func readFilters(r io.Reader, count int) (filters []filter, err error) { return []filter{f}, err } -// writeFilters writes the filters. -func writeFilters(w io.Writer, filters []filter) (n int, err error) { - for _, f := range filters { - p, err := f.MarshalBinary() - if err != nil { - return n, err - } - k, err := w.Write(p) - n += k - if err != nil { - return n, err - } - } - return n, nil -} - /*** Index ***/ // record describes a block in the xz file index. diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go index 58d6a92a7..527ea19a7 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/bintree.go +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -5,10 +5,7 @@ package lzma import ( - "bufio" "errors" - "fmt" - "io" "unicode" ) @@ -349,6 +346,7 @@ func dumpX(x uint32) string { return string(a) } +/* // dumpNode writes a representation of the node v into the io.Writer. func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { if v == null { @@ -377,6 +375,7 @@ func (t *binTree) dump(w io.Writer) error { t.dumpNode(bw, t.root, 0) return bw.Flush() } +*/ func (t *binTree) distance(v uint32) int { dist := int(t.front) - int(v) diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go index 2784ec6ba..d4309f97e 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/bitops.go +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -18,6 +18,7 @@ var ntz32Table = [32]int8{ 30, 17, 8, 14, 29, 13, 28, 27, } +/* // ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. func ntz32(x uint32) int { if x == 0 { @@ -26,6 +27,7 @@ func ntz32(x uint32) int { x = (x & -x) * ntz32Const return int(ntz32Table[x>>27]) } +*/ // nlz32 computes the number of leading zeros for an unsigned 32-bit integer. func nlz32(x uint32) int { diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go index e5a760a50..4b820792a 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/decoder.go +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -200,7 +200,7 @@ func (d *decoder) decompress() error { op, err := d.readOp() switch err { case nil: - break + // break case errEOS: d.eos = true if !d.rd.possiblyAtEnd() { diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go index ba06712b0..dd44e6625 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -126,10 +126,3 @@ func (d *decoderDict) Available() int { return d.buf.Available() } // Read reads data from the buffer contained in the decoder dictionary. func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } - -// Buffered returns the number of bytes currently buffered in the -// decoder dictionary. -func (d *decoderDict) buffered() int { return d.buf.Buffered() } - -// Peek gets data from the buffer without advancing the rear index. -func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) } diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go index e6e0c6ddf..064642831 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -4,21 +4,10 @@ package lzma -import "fmt" - // directCodec allows the encoding and decoding of values with a fixed number // of bits. The number of bits must be in the range [1,32]. type directCodec byte -// makeDirectCodec creates a directCodec. The function panics if the number of -// bits is not in the range [1,32]. -func makeDirectCodec(bits int) directCodec { - if !(1 <= bits && bits <= 32) { - panic(fmt.Errorf("bits=%d out of range", bits)) - } - return directCodec(bits) -} - // Bits returns the number of bits supported by this codec. func (dc directCodec) Bits() int { return int(dc) diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go index 69871c04a..9ed486d27 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -20,8 +20,6 @@ const ( posSlotBits = 6 // number of align bits alignBits = 4 - // maximum position slot - maxPosSlot = 63 ) // distCodec provides encoding and decoding of distance values. @@ -45,20 +43,6 @@ func (dc *distCodec) deepcopy(src *distCodec) { dc.alignCodec.deepcopy(&src.alignCodec) } -// distBits returns the number of bits required to encode dist. -func distBits(dist uint32) int { - if dist < startPosModel { - return 6 - } - // slot s > 3, dist d - // s = 2(bits(d)-1) + bit(d, bits(d)-2) - // s>>1 = bits(d)-1 - // bits(d) = 32-nlz32(d) - // s>>1=31-nlz32(d) - // n = 5 + (s>>1) = 36 - nlz32(d) - return 36 - nlz32(dist) -} - // newDistCodec creates a new distance codec. func (dc *distCodec) init() { for i := range dc.posSlotCodecs { diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go index 40f3d3f64..c36308d7c 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -19,7 +19,7 @@ type matcher interface { } // encoderDict provides the dictionary of the encoder. It includes an -// addtional buffer atop of the actual dictionary. +// additional buffer atop of the actual dictionary. type encoderDict struct { buf buffer m matcher diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go index cd148812c..ffeca35c3 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/header2.go +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -264,7 +264,7 @@ type chunkState byte // state const ( start chunkState = 'S' - stop = 'T' + stop chunkState = 'T' ) // errors for the chunk state handling diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go index 927395bd8..35b064064 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -56,19 +56,6 @@ func (lc *lengthCodec) init() { lc.high = makeTreeCodec(8) } -// lBits gives the number of bits used for the encoding of the l value -// provided to the range encoder. -func lBits(l uint32) int { - switch { - case l < 8: - return 4 - case l < 16: - return 5 - default: - return 10 - } -} - // Encode encodes the length offset. The length offset l can be compute by // subtracting minMatchLen (2) from the actual length. // diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go index ca31530fd..7b1ad1d9b 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -123,10 +123,3 @@ const ( minLP = 0 maxLP = 4 ) - -// minState and maxState define a range for the state values stored in -// the State values. -const ( - minState = 0 - maxState = 11 -) diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go index a75c9b46c..2f9b78ea5 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/operation.go +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -5,7 +5,6 @@ package lzma import ( - "errors" "fmt" "unicode" ) @@ -24,30 +23,6 @@ type match struct { n int } -// verify checks whether the match is valid. If that is not the case an -// error is returned. -func (m match) verify() error { - if !(minDistance <= m.distance && m.distance <= maxDistance) { - return errors.New("distance out of range") - } - if !(1 <= m.n && m.n <= maxMatchLen) { - return errors.New("length out of range") - } - return nil -} - -// l return the l-value for the match, which is the difference of length -// n and 2. -func (m match) l() uint32 { - return uint32(m.n - minMatchLen) -} - -// dist returns the dist value for the match, which is one less of the -// distance stored in the match. -func (m match) dist() uint32 { - return uint32(m.distance - minDistance) -} - // Len returns the number of bytes matched. func (m match) Len() int { return m.n diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go index 7189a0377..7b299abfe 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -131,32 +131,6 @@ type rangeDecoder struct { code uint32 } -// init initializes the range decoder, by reading from the byte reader. -func (d *rangeDecoder) init() error { - d.nrange = 0xffffffff - d.code = 0 - - b, err := d.br.ReadByte() - if err != nil { - return err - } - if b != 0 { - return errors.New("newRangeDecoder: first byte not zero") - } - - for i := 0; i < 4; i++ { - if err = d.updateCode(); err != nil { - return err - } - } - - if d.code >= d.nrange { - return errors.New("newRangeDecoder: d.code >= d.nrange") - } - - return nil -} - // newRangeDecoder initializes a range decoder. It reads five bytes from the // reader and therefore may return an error. func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go index 33074e624..e34c23f9c 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/reader2.go +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -48,7 +48,6 @@ type Reader2 struct { chunkReader io.Reader cstate chunkState - ctype chunkType } // NewReader2 creates a reader for an LZMA2 chunk sequence. diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go index 03f061cf1..fbe3a3942 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/state.go +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -53,12 +53,6 @@ func (s *state) Reset() { s.distCodec.init() } -// initState initializes the state. -func initState(s *state, p Properties) { - *s = state{Properties: p} - s.Reset() -} - // newState creates a new state from the give Properties. func newState(p Properties) *state { s := &state{Properties: p} diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go index 22cd6d500..795858914 100644 --- a/vendor/github.com/ulikunitz/xz/reader.go +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -26,13 +26,6 @@ type ReaderConfig struct { SingleStream bool } -// fill replaces all zero values with their default values. -func (c *ReaderConfig) fill() { - if c.DictCap == 0 { - c.DictCap = 8 * 1024 * 1024 - } -} - // Verify checks the reader parameters for Validity. Zero values will be // replaced by default values. func (c *ReaderConfig) Verify() error { @@ -165,9 +158,6 @@ func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) return r, nil } -// errIndex indicates an error with the xz file index. -var errIndex = errors.New("xz: error in xz file index") - // readTail reads the index body and the xz footer. func (r *streamReader) readTail() error { index, n, err := readIndexBody(r.xz) @@ -265,7 +255,6 @@ type blockReader struct { n int64 hash hash.Hash r io.Reader - err error } // newBlockReader creates a new block reader. @@ -315,10 +304,6 @@ func (br *blockReader) record() record { return record{br.unpaddedSize(), br.uncompressedSize()} } -// errBlockSize indicates that the size of the block in the block header -// is wrong. -var errBlockSize = errors.New("xz: wrong uncompressed size for block") - // Read reads data from the block. func (br *blockReader) Read(p []byte) (n int, err error) { n, err = br.r.Read(p) diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go index aec10dfa6..a9ed44912 100644 --- a/vendor/github.com/ulikunitz/xz/writer.go +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -6,6 +6,7 @@ package xz import ( "errors" + "fmt" "hash" "io" @@ -190,6 +191,9 @@ func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { return nil, err } data, err := w.h.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("w.h.MarshalBinary(): error %w", err) + } if _, err = xz.Write(data); err != nil { return nil, err } diff --git a/vendor/github.com/vbauerster/mpb/v5/.travis.yml b/vendor/github.com/vbauerster/mpb/v5/.travis.yml index 0eb0f2f20..9a203a67d 100644 --- a/vendor/github.com/vbauerster/mpb/v5/.travis.yml +++ b/vendor/github.com/vbauerster/mpb/v5/.travis.yml @@ -1,4 +1,7 @@ language: go +arch: + - amd64 + - ppc64le go: - 1.14.x diff --git a/vendor/github.com/vbauerster/mpb/v5/bar_option.go b/vendor/github.com/vbauerster/mpb/v5/bar_option.go index 31b7939b0..e7d2e41f9 100644 --- a/vendor/github.com/vbauerster/mpb/v5/bar_option.go +++ b/vendor/github.com/vbauerster/mpb/v5/bar_option.go @@ -123,13 +123,20 @@ func makeExtFunc(filler BarFiller) extFunc { } } -// TrimSpace trims bar's edge spaces. -func TrimSpace() BarOption { +// BarFillerTrim bar filler is rendered with leading and trailing space +// like ' [===] ' by default. With this option leading and trailing +// space will be removed. +func BarFillerTrim() BarOption { return func(s *bState) { s.trimSpace = true } } +// TrimSpace is an alias to BarFillerTrim. +func TrimSpace() BarOption { + return BarFillerTrim() +} + // BarStyle overrides mpb.DefaultBarStyle which is "[=>-]<+". // It's ok to pass string containing just 5 runes, for example "╢▌▌░╟", // if you don't need to override '<' (reverse tip) and '+' (refill rune). diff --git a/vendor/github.com/vbauerster/mpb/v5/go.mod b/vendor/github.com/vbauerster/mpb/v5/go.mod index 642bf0a5a..e80d1a10d 100644 --- a/vendor/github.com/vbauerster/mpb/v5/go.mod +++ b/vendor/github.com/vbauerster/mpb/v5/go.mod @@ -4,7 +4,7 @@ require ( github.com/VividCortex/ewma v1.1.1 github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/mattn/go-runewidth v0.0.9 - golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed + golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 ) go 1.14 diff --git a/vendor/github.com/vbauerster/mpb/v5/go.sum b/vendor/github.com/vbauerster/mpb/v5/go.sum index 7ad08f141..62cc10af0 100644 --- a/vendor/github.com/vbauerster/mpb/v5/go.sum +++ b/vendor/github.com/vbauerster/mpb/v5/go.sum @@ -4,5 +4,5 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpH github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed h1:WBkVNH1zd9jg/dK4HCM4lNANnmd12EHC9z+LmcCG4ns= -golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 h1:+CBz4km/0KPU3RGTwARGh/noP3bEwtHcq+0YcBQM2JQ= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/vbauerster/mpb/v5/progress.go b/vendor/github.com/vbauerster/mpb/v5/progress.go index ac1ce50ab..fb66ce05d 100644 --- a/vendor/github.com/vbauerster/mpb/v5/progress.go +++ b/vendor/github.com/vbauerster/mpb/v5/progress.go @@ -8,6 +8,7 @@ import ( "io" "io/ioutil" "log" + "math" "os" "sync" "time" @@ -40,7 +41,6 @@ type pState struct { pMatrix map[int][]chan int aMatrix map[int][]chan int barShutdownQueue []*Bar - barPopQueue []*Bar // following are provided/overrided by user idCount int @@ -179,7 +179,7 @@ func (p *Progress) BarCount() int { } } -// Wait waits far all bars to complete and finally shutdowns container. +// Wait waits for all bars to complete and finally shutdowns container. // After this method has been called, there is no way to reuse *Progress // instance. func (p *Progress) Wait() { @@ -301,27 +301,18 @@ func (s *pState) flush(cw *cwriter.Writer) error { delete(s.parkedBars, b) b.toDrop = true } + if s.popCompleted && !b.noPop { + lineCount -= b.extendedLines + 1 + b.toDrop = true + } if b.toDrop { delete(bm, b) s.heapUpdated = true - } else if s.popCompleted { - if b := b; !b.noPop { - defer func() { - s.barPopQueue = append(s.barPopQueue, b) - }() - } } b.cancel() } s.barShutdownQueue = s.barShutdownQueue[0:0] - for _, b := range s.barPopQueue { - delete(bm, b) - s.heapUpdated = true - lineCount -= b.extendedLines + 1 - } - s.barPopQueue = s.barPopQueue[0:0] - for b := range bm { heap.Push(&s.bHeap, b) } @@ -370,7 +361,7 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio } if s.popCompleted && !bs.noPop { - bs.priority = -1 + bs.priority = -(math.MaxInt32 - s.idCount) } bs.bufP = bytes.NewBuffer(make([]byte, 0, 128)) @@ -382,17 +373,18 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio func syncWidth(matrix map[int][]chan int) { for _, column := range matrix { - column := column - go func() { - var maxWidth int - for _, ch := range column { - if w := <-ch; w > maxWidth { - maxWidth = w - } - } - for _, ch := range column { - ch <- maxWidth - } - }() + go maxWidthDistributor(column) + } +} + +var maxWidthDistributor = func(column []chan int) { + var maxWidth int + for _, ch := range column { + if w := <-ch; w > maxWidth { + maxWidth = w + } + } + for _, ch := range column { + ch <- maxWidth } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 397ab70be..45abbe217 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -108,7 +108,7 @@ github.com/containers/common/pkg/umask github.com/containers/common/version # github.com/containers/conmon v2.0.20+incompatible github.com/containers/conmon/runner/config -# github.com/containers/image/v5 v5.9.0 +# github.com/containers/image/v5 v5.10.0 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -120,6 +120,7 @@ github.com/containers/image/v5/docker/policyconfiguration github.com/containers/image/v5/docker/reference github.com/containers/image/v5/docker/tarfile github.com/containers/image/v5/image +github.com/containers/image/v5/internal/blobinfocache github.com/containers/image/v5/internal/iolimits github.com/containers/image/v5/internal/pkg/keyctl github.com/containers/image/v5/internal/pkg/platform @@ -348,7 +349,7 @@ github.com/json-iterator/go # github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a github.com/juju/ansiterm github.com/juju/ansiterm/tabwriter -# github.com/klauspost/compress v1.11.5 +# github.com/klauspost/compress v1.11.7 github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -553,7 +554,7 @@ github.com/uber/jaeger-client-go/transport github.com/uber/jaeger-client-go/utils # github.com/uber/jaeger-lib v2.2.0+incompatible github.com/uber/jaeger-lib/metrics -# github.com/ulikunitz/xz v0.5.8 +# github.com/ulikunitz/xz v0.5.9 github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog @@ -562,7 +563,7 @@ github.com/ulikunitz/xz/lzma github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v5 v5.3.0 +# github.com/vbauerster/mpb/v5 v5.4.0 github.com/vbauerster/mpb/v5 github.com/vbauerster/mpb/v5/cwriter github.com/vbauerster/mpb/v5/decor -- cgit v1.2.3-54-g00ecf