From 8090445762f4a8c3c26da7b30eaa0670c04ca3f4 Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Fri, 17 Jun 2022 06:10:24 -0400 Subject: vendor in latests containers/image Signed-off-by: Daniel J Walsh --- vendor/github.com/containers/image/v5/copy/blob.go | 170 ++++++++ .../containers/image/v5/copy/compression.go | 320 +++++++++++++++ vendor/github.com/containers/image/v5/copy/copy.go | 440 +++------------------ .../github.com/containers/image/v5/copy/encrypt.go | 24 -- .../containers/image/v5/copy/encryption.go | 129 ++++++ .../containers/image/v5/copy/manifest.go | 68 ++-- .../image/v5/directory/directory_transport.go | 5 +- .../image/v5/docker/archive/transport.go | 8 +- .../image/v5/docker/daemon/daemon_transport.go | 8 +- .../containers/image/v5/docker/docker_client.go | 5 +- .../containers/image/v5/docker/docker_image.go | 2 +- .../containers/image/v5/image/docker_list.go | 34 -- .../containers/image/v5/image/docker_schema1.go | 248 ------------ .../containers/image/v5/image/docker_schema2.go | 392 +----------------- .../containers/image/v5/image/manifest.go | 113 ------ .../github.com/containers/image/v5/image/memory.go | 64 --- vendor/github.com/containers/image/v5/image/oci.go | 248 ------------ .../containers/image/v5/image/oci_index.go | 34 -- .../containers/image/v5/image/sourced.go | 73 +--- .../containers/image/v5/image/unparsed.go | 82 +--- .../image/v5/internal/image/docker_list.go | 34 ++ .../image/v5/internal/image/docker_schema1.go | 257 ++++++++++++ .../image/v5/internal/image/docker_schema2.go | 413 +++++++++++++++++++ .../containers/image/v5/internal/image/manifest.go | 122 ++++++ .../containers/image/v5/internal/image/memory.go | 64 +++ .../containers/image/v5/internal/image/oci.go | 271 +++++++++++++ .../image/v5/internal/image/oci_index.go | 34 ++ .../containers/image/v5/internal/image/sourced.go | 134 +++++++ .../containers/image/v5/internal/image/unparsed.go | 99 +++++ .../image/v5/internal/manifest/errors.go | 32 ++ .../containers/image/v5/manifest/common.go | 61 ++- .../containers/image/v5/manifest/docker_schema2.go | 8 + .../containers/image/v5/manifest/manifest.go | 5 + .../github.com/containers/image/v5/manifest/oci.go | 94 +++-- .../image/v5/oci/archive/oci_transport.go | 8 +- .../image/v5/oci/layout/oci_transport.go | 8 +- .../image/v5/openshift/openshift_transport.go | 8 +- .../containers/image/v5/ostree/ostree_transport.go | 14 +- .../containers/image/v5/pkg/blobcache/blobcache.go | 8 +- .../containers/image/v5/sif/transport.go | 8 +- .../containers/image/v5/storage/storage_image.go | 8 +- .../image/v5/tarball/tarball_reference.go | 13 +- vendor/github.com/containers/ocicrypt/.travis.yml | 2 +- .../containers/ocicrypt/config/constructors.go | 2 +- .../ocicrypt/config/pkcs11config/config.go | 2 +- .../containers/ocicrypt/crypto/pkcs11/common.go | 2 +- vendor/github.com/containers/ocicrypt/go.mod | 2 +- vendor/github.com/containers/ocicrypt/go.sum | 4 +- vendor/github.com/imdario/mergo/README.md | 32 +- vendor/github.com/imdario/mergo/go.mod | 2 +- vendor/github.com/imdario/mergo/go.sum | 4 +- vendor/github.com/imdario/mergo/merge.go | 2 +- vendor/github.com/imdario/mergo/mergo.go | 4 +- vendor/modules.txt | 10 +- 54 files changed, 2361 insertions(+), 1877 deletions(-) create mode 100644 vendor/github.com/containers/image/v5/copy/blob.go create mode 100644 vendor/github.com/containers/image/v5/copy/compression.go delete mode 100644 vendor/github.com/containers/image/v5/copy/encrypt.go create mode 100644 vendor/github.com/containers/image/v5/copy/encryption.go delete mode 100644 vendor/github.com/containers/image/v5/image/docker_list.go delete mode 100644 vendor/github.com/containers/image/v5/image/docker_schema1.go delete mode 100644 vendor/github.com/containers/image/v5/image/manifest.go delete mode 100644 vendor/github.com/containers/image/v5/image/memory.go delete mode 100644 vendor/github.com/containers/image/v5/image/oci.go delete mode 100644 vendor/github.com/containers/image/v5/image/oci_index.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/docker_list.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/docker_schema1.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/docker_schema2.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/manifest.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/memory.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/oci.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/oci_index.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/sourced.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/unparsed.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/errors.go (limited to 'vendor') diff --git a/vendor/github.com/containers/image/v5/copy/blob.go b/vendor/github.com/containers/image/v5/copy/blob.go new file mode 100644 index 000000000..020e703e8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/blob.go @@ -0,0 +1,170 @@ +package copy + +import ( + "context" + "io" + + "github.com/containers/image/v5/internal/private" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcReader to dest, +// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, +// perhaps (de/re/)compressing it if canModifyBlob, +// and returns a complete blobInfo of the copied blob. +func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Reader, srcInfo types.BlobInfo, + getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer, + isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { + // The copying happens through a pipeline of connected io.Readers; + // that pipeline is built by updating stream. + // === Input: srcReader + stream := sourceStream{ + reader: srcReader, + info: srcInfo, + } + + // === Process input through digestingReader to validate against the expected digest. + // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, + // use a separate validation failure indicator. + // Note that for this check we don't use the stronger "validationSucceeded" indicator, because + // dest.PutBlob may detect that the layer already exists, in which case we don't + // read stream to the end, and validation does not happen. + digestingReader, err := newDigestingReader(stream.reader, srcInfo.Digest) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest) + } + stream.reader = digestingReader + + // === Update progress bars + stream.reader = bar.ProxyReader(stream.reader) + + // === Decrypt the stream, if required. + decryptionStep, err := ic.c.blobPipelineDecryptionStep(&stream, srcInfo) + if err != nil { + return types.BlobInfo{}, err + } + + // === Detect compression of the input stream. + // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + detectedCompression, err := blobPipelineDetectCompressionStep(&stream, srcInfo) + if err != nil { + return types.BlobInfo{}, err + } + + // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. + var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. + if getOriginalLayerCopyWriter != nil { + stream.reader = io.TeeReader(stream.reader, getOriginalLayerCopyWriter(detectedCompression.decompressor)) + originalLayerReader = stream.reader + } + + // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists + // short-circuit conditions + canModifyBlob := !isConfig && ic.cannotModifyManifestReason == "" + // === Deal with layer compression/decompression if necessary + compressionStep, err := ic.blobPipelineCompressionStep(&stream, canModifyBlob, srcInfo, detectedCompression) + if err != nil { + return types.BlobInfo{}, err + } + defer compressionStep.close() + + // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided + if decryptionStep.decrypting && toEncrypt { + // If nothing else, we can only set uploadedInfo.CryptoOperation to a single value. + // Before relaxing this, see the original pull request’s review if there are other reasons to reject this. + return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy") + } + encryptionStep, err := ic.c.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep) + if err != nil { + return types.BlobInfo{}, err + } + + // === Report progress using the ic.c.progress channel, if required. + if ic.c.progress != nil && ic.c.progressInterval > 0 { + progressReader := newProgressReader( + stream.reader, + ic.c.progress, + ic.c.progressInterval, + srcInfo, + ) + defer progressReader.reportDone() + stream.reader = progressReader + } + + // === Finally, send the layer stream to dest. + options := private.PutBlobOptions{ + Cache: ic.c.blobInfoCache, + IsConfig: isConfig, + EmptyLayer: emptyLayer, + } + if !isConfig { + options.LayerIndex = &layerIndex + } + uploadedInfo, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options) + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "writing blob") + } + + uploadedInfo.Annotations = stream.info.Annotations + + compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations) + decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation) + if err := encryptionStep.updateCryptoOperationAndAnnotations(&uploadedInfo.CryptoOperation, &uploadedInfo.Annotations); err != nil { + return types.BlobInfo{}, err + } + + // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume + // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. + // So, read everything from originalLayerReader, which will cause the rest to be + // sent there if we are not already at EOF. + if getOriginalLayerCopyWriter != nil { + logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") + _, err := io.Copy(io.Discard, originalLayerReader) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest) + } + } + + if digestingReader.validationFailed { // Coverage: This should never happen. + return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) + } + if stream.info.Digest != "" && uploadedInfo.Digest != stream.info.Digest { + return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest) + } + if digestingReader.validationSucceeded { + if err := compressionStep.recordValidatedDigestData(ic.c, uploadedInfo, srcInfo, encryptionStep, decryptionStep); err != nil { + return types.BlobInfo{}, err + } + } + + return uploadedInfo, nil +} + +// sourceStream encapsulates an input consumed by copyBlobFromStream, in progress of being built. +// This allows handles of individual aspects to build the copy pipeline without _too much_ +// specific cooperation by the caller. +// +// We are currently very far from a generalized plug-and-play API for building/consuming the pipeline +// without specific knowledge of various aspects in copyBlobFromStream; that may come one day. +type sourceStream struct { + reader io.Reader + info types.BlobInfo // corresponding to the data available in reader. +} + +// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read. +// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination. +type errorAnnotationReader struct { + reader io.Reader +} + +// Read annotates the error happened during read +func (r errorAnnotationReader) Read(b []byte) (n int, err error) { + n, err = r.reader.Read(b) + if err != io.EOF { + return n, errors.Wrapf(err, "happened during read") + } + return n, err +} diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go new file mode 100644 index 000000000..99305a039 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -0,0 +1,320 @@ +package copy + +import ( + "io" + + internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/pkg/compression" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step. +type bpDetectCompressionStepData struct { + isCompressed bool + format compressiontypes.Algorithm // Valid if isCompressed + decompressor compressiontypes.DecompressorFunc // Valid if isCompressed + srcCompressorName string // Compressor name to possibly record in the blob info cache for the source blob. +} + +// blobPipelineDetectCompressionStep updates *stream to detect its current compression format. +// srcInfo is only used for error messages. +// Returns data for other steps. +func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobInfo) (bpDetectCompressionStepData, error) { + // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform + if err != nil { + return bpDetectCompressionStepData{}, errors.Wrapf(err, "reading blob %s", srcInfo.Digest) + } + stream.reader = reader + + res := bpDetectCompressionStepData{ + isCompressed: decompressor != nil, + format: format, + decompressor: decompressor, + } + if res.isCompressed { + res.srcCompressorName = format.Name() + } else { + res.srcCompressorName = internalblobinfocache.Uncompressed + } + + if expectedFormat, known := expectedCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.Name() != expectedFormat.Name() { + logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedFormat.Name(), format.Name()) + } + return res, nil +} + +// bpCompressionStepData contains data that the copy pipeline needs about the compression step. +type bpCompressionStepData struct { + operation types.LayerCompression // Operation to use for updating the blob metadata. + uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits. + uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. + srcCompressorName string // Compressor name to record in the blob info cache for the source blob. + uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob. + closers []io.Closer // Objects to close after the upload is done, if any. +} + +// blobPipelineCompressionStep updates *stream to compress and/or decompress it. +// srcInfo is primarily used for error messages. +// Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData, +// and must eventually call close. +func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModifyBlob bool, srcInfo types.BlobInfo, + detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists + // short-circuit conditions + layerCompressionChangeSupported := ic.src.CanChangeLayerCompression(stream.info.MediaType) + if !layerCompressionChangeSupported { + logrus.Debugf("Compression change for blob %s (%q) not supported", srcInfo.Digest, stream.info.MediaType) + } + if canModifyBlob && layerCompressionChangeSupported { + for _, fn := range []func(*sourceStream, bpDetectCompressionStepData) (*bpCompressionStepData, error){ + ic.bpcPreserveEncrypted, + ic.bpcCompressUncompressed, + ic.bpcRecompressCompressed, + ic.bpcDecompressCompressed, + } { + res, err := fn(stream, detected) + if err != nil { + return nil, err + } + if res != nil { + return res, nil + } + } + } + return ic.bpcPreserveOriginal(stream, detected, layerCompressionChangeSupported), nil +} + +// bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if isOciEncrypted(stream.info.MediaType) { + logrus.Debugf("Using original blob without modification for encrypted blob") + // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted + return &bpCompressionStepData{ + operation: types.PreserveOriginal, + uploadedAlgorithm: nil, + srcCompressorName: internalblobinfocache.UnknownCompression, + uploadedCompressorName: internalblobinfocache.UnknownCompression, + }, nil + } + return nil, nil +} + +// bpcCompressUncompressed checks if we should be compressing an uncompressed input, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed { + logrus.Debugf("Compressing blob on the fly") + var uploadedAlgorithm *compressiontypes.Algorithm + if ic.c.compressionFormat != nil { + uploadedAlgorithm = ic.c.compressionFormat + } else { + uploadedAlgorithm = defaultCompressionFormat + } + + reader, annotations := ic.c.compressedStream(stream.reader, *uploadedAlgorithm) + // Note: reader must be closed on all return paths. + stream.reader = reader + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? + Digest: "", + Size: -1, + } + return &bpCompressionStepData{ + operation: types.Compress, + uploadedAlgorithm: uploadedAlgorithm, + uploadedAnnotations: annotations, + srcCompressorName: detected.srcCompressorName, + uploadedCompressorName: uploadedAlgorithm.Name(), + closers: []io.Closer{reader}, + }, nil + } + return nil, nil +} + +// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed && + ic.c.compressionFormat != nil && ic.c.compressionFormat.Name() != detected.format.Name() { + // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally + // re-compressed using the desired format. + logrus.Debugf("Blob will be converted") + + decompressed, err := detected.decompressor(stream.reader) + if err != nil { + return nil, err + } + succeeded := false + defer func() { + if !succeeded { + decompressed.Close() + } + }() + + recompressed, annotations := ic.c.compressedStream(decompressed, *ic.c.compressionFormat) + // Note: recompressed must be closed on all return paths. + stream.reader = recompressed + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? + Digest: "", + Size: -1, + } + succeeded = true + return &bpCompressionStepData{ + operation: types.PreserveOriginal, + uploadedAlgorithm: ic.c.compressionFormat, + uploadedAnnotations: annotations, + srcCompressorName: detected.srcCompressorName, + uploadedCompressorName: ic.c.compressionFormat.Name(), + closers: []io.Closer{decompressed, recompressed}, + }, nil + } + return nil, nil +} + +// bpcDecompressCompressed checks if we should be decompressing a compressed input, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Decompress && detected.isCompressed { + logrus.Debugf("Blob will be decompressed") + s, err := detected.decompressor(stream.reader) + if err != nil { + return nil, err + } + // Note: s must be closed on all return paths. + stream.reader = s + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? + Digest: "", + Size: -1, + } + return &bpCompressionStepData{ + operation: types.Decompress, + uploadedAlgorithm: nil, + srcCompressorName: detected.srcCompressorName, + uploadedCompressorName: internalblobinfocache.Uncompressed, + closers: []io.Closer{s}, + }, nil + } + return nil, nil +} + +// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob. +func (ic *imageCopier) bpcPreserveOriginal(stream *sourceStream, detected bpDetectCompressionStepData, + layerCompressionChangeSupported bool) *bpCompressionStepData { + logrus.Debugf("Using original blob without modification") + // Remember if the original blob was compressed, and if so how, so that if + // LayerInfosForCopy() returned something that differs from what was in the + // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(), + // it will be able to correctly derive the MediaType for the copied blob. + // + // But don’t touch blobs in objects where we can’t change compression, + // so that src.UpdatedImage() doesn’t fail; assume that for such blobs + // LayerInfosForCopy() should not be making any changes in the first place. + var algorithm *compressiontypes.Algorithm + if layerCompressionChangeSupported && detected.isCompressed { + algorithm = &detected.format + } else { + algorithm = nil + } + return &bpCompressionStepData{ + operation: types.PreserveOriginal, + uploadedAlgorithm: algorithm, + srcCompressorName: detected.srcCompressorName, + uploadedCompressorName: detected.srcCompressorName, + } +} + +// updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary. +func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) { + *operation = d.operation + // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. + *algorithm = d.uploadedAlgorithm + if *annotations == nil { + *annotations = map[string]string{} + } + for k, v := range d.uploadedAnnotations { + (*annotations)[k] = v + } +} + +// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo. +// This must ONLY be called if all data has been validated by OUR code, and is not comming from third parties. +func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo, + encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error { + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptionStep.encrypting && !decryptionStep.decrypting { + // If d.operation != types.PreserveOriginal, we now have two reliable digest values: + // srcinfo.Digest describes the pre-d.operation input, verified by digestingReader + // uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob + // (because stream.info.Digest == "", this must have been computed afresh). + switch d.operation { + case types.PreserveOriginal: + break // Do nothing, we have only one digest and we might not have even verified it. + case types.Compress: + c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + case types.Decompress: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + default: + return errors.Errorf("Internal error: Unexpected d.operation value %#v", d.operation) + } + } + if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName) + } + if srcInfo.Digest != "" && d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName) + } + return nil +} + +// close closes objects that carry state throughout the compression/decompression operation. +func (d *bpCompressionStepData) close() { + for _, c := range d.closers { + c.Close() + } +} + +// doCompression reads all input from src and writes its compressed equivalent to dest. +func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error { + compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel) + if err != nil { + return err + } + + buf := make([]byte, compressionBufferSize) + + _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close() + if err != nil { + compressor.Close() + return err + } + + return compressor.Close() +} + +// compressGoroutine reads all input from src and writes its compressed equivalent to dest. +func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) { + err := errors.New("Internal error: unexpected panic in compressGoroutine") + defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. + _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + + err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel) +} + +// compressedStream returns a stream the input reader compressed using format, and a metadata map. +// The caller must close the returned reader. +// AFTER the stream is consumed, metadata will be updated with annotations to use on the data. +func (c *copier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) { + pipeReader, pipeWriter := io.Pipe() + annotations := map[string]string{} + // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, + // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, + // we don’t care. + go c.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter + return pipeReader, annotations +} diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index 123c23e02..0df595237 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -12,8 +12,8 @@ import ( "time" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/internal/imagedestination" "github.com/containers/image/v5/internal/imagesource" "github.com/containers/image/v5/internal/pkg/platform" @@ -25,7 +25,6 @@ import ( "github.com/containers/image/v5/signature" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" - "github.com/containers/ocicrypt" encconfig "github.com/containers/ocicrypt/config" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -82,7 +81,7 @@ type copier struct { type imageCopier struct { c *copier manifestUpdates *types.ManifestUpdateOptions - src types.Image + src *image.SourcedImage diffIDsAreNeeded bool cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can canSubstituteBlobs bool @@ -349,13 +348,8 @@ func supportsMultipleImages(dest types.ImageDestination) bool { // compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the // (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal. -func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src types.Image, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) { - srcManifest, _, err := src.Manifest(ctx) - if err != nil { - return false, nil, "", "", errors.Wrapf(err, "reading manifest from image") - } - - srcManifestDigest, err := manifest.Digest(srcManifest) +func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) { + srcManifestDigest, err := manifest.Digest(src.ManifestBlob) if err != nil { return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest") } @@ -620,11 +614,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli if named := c.dest.Reference().DockerReference(); named != nil { if digested, ok := named.(reference.Digested); ok { destIsDigestedReference = true - sourceManifest, _, err := src.Manifest(ctx) - if err != nil { - return nil, "", "", errors.Wrapf(err, "reading manifest from source image") - } - matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest()) + matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest()) if err != nil { return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest") } @@ -688,12 +678,14 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli cannotModifyManifestReason: cannotModifyManifestReason, ociEncryptLayers: options.OciEncryptLayers, } - // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. - // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: - // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. - // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk - // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, - // and we would reuse and sign it. + // Decide whether we can substitute blobs with semantic equivalents: + // - Don’t do that if we can’t modify the manifest at all + // - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. + // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: + // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. + // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk + // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, + // and we would reuse and sign it. ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && options.SignBy == "" if err := ic.updateEmbeddedDockerReference(); err != nil { @@ -702,12 +694,23 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil - // We compute preferredManifestMIMEType only to show it in error messages. - // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. - preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType, destRequiresOciEncryption) + manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{ + srcMIMEType: ic.src.ManifestMIMEType, + destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(), + forceManifestMIMEType: options.ForceManifestMIMEType, + requiresOCIEncryption: destRequiresOciEncryption, + cannotModifyManifestReason: ic.cannotModifyManifestReason, + }) if err != nil { return nil, "", "", err } + // We set up this part of ic.manifestUpdates quite early, not just around the + // code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code + // (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based + // on the expected destination format. + if manifestConversionPlan.preferredMIMETypeNeedsConversion { + ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType + } // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) @@ -742,22 +745,22 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if // we're altering how they're compressed. If the process succeeds, fine… manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) - retManifestType = preferredManifestMIMEType + retManifestType = manifestConversionPlan.preferredMIMEType if err != nil { - logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) + logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err) // … if it fails, and the failure is either because the manifest is rejected by the registry, or // because we failed to create a manifest of the specified type because the specific manifest type // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may // have other options available that could still succeed. _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError) _, isCompressionIncompatible := errors.Cause(err).(manifest.ManifestLayerCompressionIncompatibilityError) - if (!isManifestRejected && !isCompressionIncompatible) || len(otherManifestMIMETypeCandidates) == 0 { + if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 { // We don’t have other options. // In principle the code below would handle this as well, but the resulting error message is fairly ugly. // Don’t bother the user with MIME types if we have no choice. return nil, "", "", err } - // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. + // If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType. // So if we are here, we will definitely be trying to convert the manifest. // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, // so let’s bail out early and with a better error message. @@ -766,8 +769,8 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli } // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. - errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)} - for _, manifestMIMEType := range otherManifestMIMETypeCandidates { + errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)} + for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates { logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) ic.manifestUpdates.ManifestMIMEType = manifestMIMEType attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) @@ -908,11 +911,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { // The manifest is used to extract the information whether a given // layer is empty. - manifestBlob, manifestType, err := ic.src.Manifest(ctx) - if err != nil { - return err - } - man, err := manifest.FromBlob(manifestBlob, manifestType) + man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType) if err != nil { return err } @@ -1022,7 +1021,7 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool { // stores the resulting config and manifest to the destination, and returns the stored manifest // and its digest. func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { - pendingImage := ic.src + var pendingImage types.Image = ic.src if !ic.noPendingManifestUpdates() { if ic.cannotModifyManifestReason != "" { return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason) @@ -1047,7 +1046,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc return nil, "", errors.Wrap(err, "reading manifest") } - if err := ic.c.copyConfig(ctx, pendingImage); err != nil { + if err := ic.copyConfig(ctx, pendingImage); err != nil { return nil, "", err } @@ -1067,19 +1066,19 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc } // copyConfig copies config.json, if any, from src to dest. -func (c *copier) copyConfig(ctx context.Context, src types.Image) error { +func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error { srcInfo := src.ConfigInfo() if srcInfo.Digest != "" { - if err := c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. return fmt.Errorf("copying config: %w", err) } - defer c.concurrentBlobCopiesSemaphore.Release(1) + defer ic.c.concurrentBlobCopiesSemaphore.Release(1) destInfo, err := func() (types.BlobInfo, error) { // A scope for defer - progressPool := c.newProgressPool() + progressPool := ic.c.newProgressPool() defer progressPool.Wait() - bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done") + bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done") defer bar.Abort(false) configBlob, err := src.ConfigBlob(ctx) @@ -1087,7 +1086,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error { return types.BlobInfo{}, errors.Wrapf(err, "reading config blob %s", srcInfo.Digest) } - destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false) + destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false) if err != nil { return types.BlobInfo{}, err } @@ -1146,6 +1145,10 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable. if canAvoidProcessingCompleteLayer { + canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType) + logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v", + srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression) + canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType) // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause @@ -1154,7 +1157,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // the ImageDestination interface lets us pass in. reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ Cache: ic.c.blobInfoCache, - CanSubstitute: ic.canSubstituteBlobs, + CanSubstitute: canSubstitute, EmptyLayer: emptyLayer, LayerIndex: &layerIndex, SrcRef: srcRef, @@ -1303,7 +1306,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea } } - blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.cannotModifyManifestReason == "", false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success + blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } @@ -1333,350 +1336,3 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF return digest.Canonical.FromReader(stream) } - -// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read. -// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination. -type errorAnnotationReader struct { - reader io.Reader -} - -// Read annotates the error happened during read -func (r errorAnnotationReader) Read(b []byte) (n int, err error) { - n, err = r.reader.Read(b) - if err != io.EOF { - return n, errors.Wrapf(err, "happened during read") - } - return n, err -} - -// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, -// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, -// perhaps (de/re/)compressing it if canModifyBlob, -// and returns a complete blobInfo of the copied blob. -func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer, - canModifyBlob bool, isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { - if isConfig { // This is guaranteed by the caller, but set it here to be explicit. - canModifyBlob = false - } - - // The copying happens through a pipeline of connected io.Readers. - // === Input: srcStream - - // === Process input through digestingReader to validate against the expected digest. - // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, - // use a separate validation failure indicator. - // Note that for this check we don't use the stronger "validationSucceeded" indicator, because - // dest.PutBlob may detect that the layer already exists, in which case we don't - // read stream to the end, and validation does not happen. - digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest) - } - var destStream io.Reader = digestingReader - - // === Update progress bars - destStream = bar.ProxyReader(destStream) - - // === Decrypt the stream, if required. - var decrypted bool - if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil { - newDesc := imgspecv1.Descriptor{ - Annotations: srcInfo.Annotations, - } - - var d digest.Digest - destStream, d, err = ocicrypt.DecryptLayer(c.ociDecryptConfig, destStream, newDesc, false) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "decrypting layer %s", srcInfo.Digest) - } - - srcInfo.Digest = d - srcInfo.Size = -1 - for k := range srcInfo.Annotations { - if strings.HasPrefix(k, "org.opencontainers.image.enc") { - delete(srcInfo.Annotations, k) - } - } - decrypted = true - } - - // === Detect compression of the input stream. - // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. - compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "reading blob %s", srcInfo.Digest) - } - isCompressed := decompressor != nil - if expectedCompressionFormat, known := expectedCompressionFormats[srcInfo.MediaType]; known && isCompressed && compressionFormat.Name() != expectedCompressionFormat.Name() { - logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name()) - } - - // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. - var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. - if getOriginalLayerCopyWriter != nil { - destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor)) - originalLayerReader = destStream - } - - compressionMetadata := map[string]string{} - // === Deal with layer compression/decompression if necessary - // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists - // short-circuit conditions - var inputInfo types.BlobInfo - var compressionOperation types.LayerCompression - var uploadCompressionFormat *compressiontypes.Algorithm - srcCompressorName := internalblobinfocache.Uncompressed - if isCompressed { - srcCompressorName = compressionFormat.Name() - } - var uploadCompressorName string - if canModifyBlob && isOciEncrypted(srcInfo.MediaType) { - // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted - logrus.Debugf("Using original blob without modification for encrypted blob") - compressionOperation = types.PreserveOriginal - inputInfo = srcInfo - srcCompressorName = internalblobinfocache.UnknownCompression - uploadCompressionFormat = nil - uploadCompressorName = internalblobinfocache.UnknownCompression - } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed { - logrus.Debugf("Compressing blob on the fly") - compressionOperation = types.Compress - pipeReader, pipeWriter := io.Pipe() - defer pipeReader.Close() - - if c.compressionFormat != nil { - uploadCompressionFormat = c.compressionFormat - } else { - uploadCompressionFormat = defaultCompressionFormat - } - // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, - // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, - // we don’t care. - go c.compressGoroutine(pipeWriter, destStream, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter - destStream = pipeReader - inputInfo.Digest = "" - inputInfo.Size = -1 - uploadCompressorName = uploadCompressionFormat.Name() - } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && - c.compressionFormat != nil && c.compressionFormat.Name() != compressionFormat.Name() { - // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally - // re-compressed using the desired format. - logrus.Debugf("Blob will be converted") - - compressionOperation = types.PreserveOriginal - s, err := decompressor(destStream) - if err != nil { - return types.BlobInfo{}, err - } - defer s.Close() - - pipeReader, pipeWriter := io.Pipe() - defer pipeReader.Close() - - uploadCompressionFormat = c.compressionFormat - go c.compressGoroutine(pipeWriter, s, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter - - destStream = pipeReader - inputInfo.Digest = "" - inputInfo.Size = -1 - uploadCompressorName = uploadCompressionFormat.Name() - } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed { - logrus.Debugf("Blob will be decompressed") - compressionOperation = types.Decompress - s, err := decompressor(destStream) - if err != nil { - return types.BlobInfo{}, err - } - defer s.Close() - destStream = s - inputInfo.Digest = "" - inputInfo.Size = -1 - uploadCompressionFormat = nil - uploadCompressorName = internalblobinfocache.Uncompressed - } else { - // PreserveOriginal might also need to recompress the original blob if the desired compression format is different. - logrus.Debugf("Using original blob without modification") - compressionOperation = types.PreserveOriginal - inputInfo = srcInfo - // Remember if the original blob was compressed, and if so how, so that if - // LayerInfosForCopy() returned something that differs from what was in the - // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(), - // it will be able to correctly derive the MediaType for the copied blob. - if isCompressed { - uploadCompressionFormat = &compressionFormat - } else { - uploadCompressionFormat = nil - } - uploadCompressorName = srcCompressorName - } - - // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided - var ( - encrypted bool - finalizer ocicrypt.EncryptLayerFinalizer - ) - if toEncrypt { - if decrypted { - return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy") - } - - if !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil { - var annotations map[string]string - if !decrypted { - annotations = srcInfo.Annotations - } - desc := imgspecv1.Descriptor{ - MediaType: srcInfo.MediaType, - Digest: srcInfo.Digest, - Size: srcInfo.Size, - Annotations: annotations, - } - - s, fin, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, destStream, desc) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "encrypting blob %s", srcInfo.Digest) - } - - destStream = s - finalizer = fin - inputInfo.Digest = "" - inputInfo.Size = -1 - encrypted = true - } - } - - // === Report progress using the c.progress channel, if required. - if c.progress != nil && c.progressInterval > 0 { - progressReader := newProgressReader( - destStream, - c.progress, - c.progressInterval, - srcInfo, - ) - defer progressReader.reportDone() - destStream = progressReader - } - - // === Finally, send the layer stream to dest. - options := private.PutBlobOptions{ - Cache: c.blobInfoCache, - IsConfig: isConfig, - EmptyLayer: emptyLayer, - } - if !isConfig { - options.LayerIndex = &layerIndex - } - uploadedInfo, err := c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options) - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "writing blob") - } - - uploadedInfo.Annotations = srcInfo.Annotations - - uploadedInfo.CompressionOperation = compressionOperation - // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. - uploadedInfo.CompressionAlgorithm = uploadCompressionFormat - if decrypted { - uploadedInfo.CryptoOperation = types.Decrypt - } else if encrypted { - encryptAnnotations, err := finalizer() - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Unable to finalize encryption") - } - uploadedInfo.CryptoOperation = types.Encrypt - if uploadedInfo.Annotations == nil { - uploadedInfo.Annotations = map[string]string{} - } - for k, v := range encryptAnnotations { - uploadedInfo.Annotations[k] = v - } - } - - // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume - // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. - // So, read everything from originalLayerReader, which will cause the rest to be - // sent there if we are not already at EOF. - if getOriginalLayerCopyWriter != nil { - logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") - _, err := io.Copy(io.Discard, originalLayerReader) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest) - } - } - - if digestingReader.validationFailed { // Coverage: This should never happen. - return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) - } - if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest { - return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) - } - if digestingReader.validationSucceeded { - // Don’t record any associations that involve encrypted data. This is a bit crude, - // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) - // might be safe, but it’s not trivially obvious, so let’s be conservative for now. - // This crude approach also means we don’t need to record whether a blob is encrypted - // in the blob info cache (which would probably be necessary for any more complex logic), - // and the simplicity is attractive. - if !encrypted && !decrypted { - // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: - // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader - // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob - // (because inputInfo.Digest == "", this must have been computed afresh). - switch compressionOperation { - case types.PreserveOriginal: - break // Do nothing, we have only one digest and we might not have even verified it. - case types.Compress: - c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) - case types.Decompress: - c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) - default: - return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) - } - } - if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression { - c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName) - } - if srcInfo.Digest != "" && srcCompressorName != "" && srcCompressorName != internalblobinfocache.UnknownCompression { - c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName) - } - } - - // Copy all the metadata generated by the compressor into the annotations. - if uploadedInfo.Annotations == nil { - uploadedInfo.Annotations = map[string]string{} - } - for k, v := range compressionMetadata { - uploadedInfo.Annotations[k] = v - } - - return uploadedInfo, nil -} - -// doCompression reads all input from src and writes its compressed equivalent to dest. -func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error { - compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel) - if err != nil { - return err - } - - buf := make([]byte, compressionBufferSize) - - _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close() - if err != nil { - compressor.Close() - return err - } - - return compressor.Close() -} - -// compressGoroutine reads all input from src and writes its compressed equivalent to dest. -func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) { - err := errors.New("Internal error: unexpected panic in compressGoroutine") - defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. - _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil - }() - - err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel) -} diff --git a/vendor/github.com/containers/image/v5/copy/encrypt.go b/vendor/github.com/containers/image/v5/copy/encrypt.go deleted file mode 100644 index a18d6f151..000000000 --- a/vendor/github.com/containers/image/v5/copy/encrypt.go +++ /dev/null @@ -1,24 +0,0 @@ -package copy - -import ( - "strings" - - "github.com/containers/image/v5/types" -) - -// isOciEncrypted returns a bool indicating if a mediatype is encrypted -// This function will be moved to be part of OCI spec when adopted. -func isOciEncrypted(mediatype string) bool { - return strings.HasSuffix(mediatype, "+encrypted") -} - -// isEncrypted checks if an image is encrypted -func isEncrypted(i types.Image) bool { - layers := i.LayerInfos() - for _, l := range layers { - if isOciEncrypted(l.MediaType) { - return true - } - } - return false -} diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go new file mode 100644 index 000000000..ae0576da4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/encryption.go @@ -0,0 +1,129 @@ +package copy + +import ( + "strings" + + "github.com/containers/image/v5/types" + "github.com/containers/ocicrypt" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// isOciEncrypted returns a bool indicating if a mediatype is encrypted +// This function will be moved to be part of OCI spec when adopted. +func isOciEncrypted(mediatype string) bool { + return strings.HasSuffix(mediatype, "+encrypted") +} + +// isEncrypted checks if an image is encrypted +func isEncrypted(i types.Image) bool { + layers := i.LayerInfos() + for _, l := range layers { + if isOciEncrypted(l.MediaType) { + return true + } + } + return false +} + +// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step. +type bpDecryptionStepData struct { + decrypting bool // We are actually decrypting the stream +} + +// blobPipelineDecryptionStep updates *stream to decrypt if, it necessary. +// srcInfo is only used for error messages. +// Returns data for other steps; the caller should eventually use updateCryptoOperation. +func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) { + if isOciEncrypted(stream.info.MediaType) && c.ociDecryptConfig != nil { + desc := imgspecv1.Descriptor{ + Annotations: stream.info.Annotations, + } + reader, decryptedDigest, err := ocicrypt.DecryptLayer(c.ociDecryptConfig, stream.reader, desc, false) + if err != nil { + return nil, errors.Wrapf(err, "decrypting layer %s", srcInfo.Digest) + } + + stream.reader = reader + stream.info.Digest = decryptedDigest + stream.info.Size = -1 + for k := range stream.info.Annotations { + if strings.HasPrefix(k, "org.opencontainers.image.enc") { + delete(stream.info.Annotations, k) + } + } + return &bpDecryptionStepData{ + decrypting: true, + }, nil + } + return &bpDecryptionStepData{ + decrypting: false, + }, nil +} + +// updateCryptoOperation sets *operation, if necessary. +func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypto) { + if d.decrypting { + *operation = types.Decrypt + } +} + +// bpdData contains data that the copy pipeline needs about the encryption step. +type bpEncryptionStepData struct { + encrypting bool // We are actually encrypting the stream + finalizer ocicrypt.EncryptLayerFinalizer +} + +// blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt. +// srcInfo is primarily used for error messages. +// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations. +func (c *copier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo, + decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) { + if toEncrypt && !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil { + var annotations map[string]string + if !decryptionStep.decrypting { + annotations = srcInfo.Annotations + } + desc := imgspecv1.Descriptor{ + MediaType: srcInfo.MediaType, + Digest: srcInfo.Digest, + Size: srcInfo.Size, + Annotations: annotations, + } + reader, finalizer, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, stream.reader, desc) + if err != nil { + return nil, errors.Wrapf(err, "encrypting blob %s", srcInfo.Digest) + } + + stream.reader = reader + stream.info.Digest = "" + stream.info.Size = -1 + return &bpEncryptionStepData{ + encrypting: true, + finalizer: finalizer, + }, nil + } + return &bpEncryptionStepData{ + encrypting: false, + }, nil +} + +// updateCryptoOperationAndAnnotations sets *operation and updates *annotations, if necessary. +func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *types.LayerCrypto, annotations *map[string]string) error { + if !d.encrypting { + return nil + } + + encryptAnnotations, err := d.finalizer() + if err != nil { + return errors.Wrap(err, "Unable to finalize encryption") + } + *operation = types.Encrypt + if *annotations == nil { + *annotations = map[string]string{} + } + for k, v := range encryptAnnotations { + (*annotations)[k] = v + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go index 86ec8863a..b65459f8c 100644 --- a/vendor/github.com/containers/image/v5/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -38,31 +38,50 @@ func (os *orderedSet) append(s string) { } } -// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest. -// Note that the conversion will only happen later, through ic.src.UpdatedImage -// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified), -// and a list of other possible alternatives, in order. -func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string, requiresOciEncryption bool) (string, []string, error) { - _, srcType, err := ic.src.Manifest(ctx) - if err != nil { // This should have been cached?! - return "", nil, errors.Wrap(err, "reading manifest") - } +// determineManifestConversionInputs contains the inputs for determineManifestConversion. +type determineManifestConversionInputs struct { + srcMIMEType string // MIME type of the input manifest + + destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes() + + forceManifestMIMEType string // User’s choice of forced manifest MIME type + requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can +} + +// manifestConversionPlan contains the decisions made by determineManifestConversion. +type manifestConversionPlan struct { + // The preferred manifest MIME type (whether we are converting to it or using it unmodified). + // We compute this only to show it in error messages; without having to add this context + // in an error message, we would be happy enough to know only that no conversion is needed. + preferredMIMEType string + preferredMIMETypeNeedsConversion bool // True if using preferredMIMEType requires a conversion step. + otherMIMETypeCandidates []string // Other possible alternatives, in order +} + +// determineManifestConversion returns a plan for what formats, and possibly conversions, to use based on in. +func determineManifestConversion(in determineManifestConversionInputs) (manifestConversionPlan, error) { + srcType := in.srcMIMEType normalizedSrcType := manifest.NormalizedMIMEType(srcType) if srcType != normalizedSrcType { logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType) srcType = normalizedSrcType } - if forceManifestMIMEType != "" { - destSupportedManifestMIMETypes = []string{forceManifestMIMEType} + destSupportedManifestMIMETypes := in.destSupportedManifestMIMETypes + if in.forceManifestMIMEType != "" { + destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType} } - if len(destSupportedManifestMIMETypes) == 0 && (!requiresOciEncryption || manifest.MIMETypeSupportsEncryption(srcType)) { - return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions. + if len(destSupportedManifestMIMETypes) == 0 && (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) { + return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions. + preferredMIMEType: srcType, + otherMIMETypeCandidates: []string{}, + }, nil } supportedByDest := map[string]struct{}{} for _, t := range destSupportedManifestMIMETypes { - if !requiresOciEncryption || manifest.MIMETypeSupportsEncryption(t) { + if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) { supportedByDest[t] = struct{}{} } } @@ -79,13 +98,16 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp if _, ok := supportedByDest[srcType]; ok { prioritizedTypes.append(srcType) } - if ic.cannotModifyManifestReason != "" { + if in.cannotModifyManifestReason != "" { // We could also drop this check and have the caller // make the choice; it is already doing that to an extent, to improve error // messages. But it is nice to hide the “if we can't modify, do no conversion” // special case in here; the caller can then worry (or not) only about a good UI. logrus.Debugf("We can't modify the manifest, hoping for the best...") - return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? + return manifestConversionPlan{ // Take our chances - FIXME? Or should we fail without trying? + preferredMIMEType: srcType, + otherMIMETypeCandidates: []string{}, + }, nil } // Then use our list of preferred types. @@ -102,15 +124,17 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen. - return "", nil, errors.New("Internal error: no candidate MIME types") + return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types") } - preferredType := prioritizedTypes.list[0] - if preferredType != srcType { - ic.manifestUpdates.ManifestMIMEType = preferredType - } else { + res := manifestConversionPlan{ + preferredMIMEType: prioritizedTypes.list[0], + otherMIMETypeCandidates: prioritizedTypes.list[1:], + } + res.preferredMIMETypeNeedsConversion = res.preferredMIMEType != srcType + if !res.preferredMIMETypeNeedsConversion { logrus.Debugf("... will first try using the original manifest unmodified") } - return preferredType, prioritizedTypes.list[1:], nil + return res, nil } // isMultiImage returns true if img is a list of images diff --git a/vendor/github.com/containers/image/v5/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go index e542d888c..562404470 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_transport.go +++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go @@ -8,7 +8,7 @@ import ( "github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" @@ -140,8 +140,7 @@ func (ref dirReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src := newImageSource(ref) - return image.FromSource(ctx, sys, src) + return image.FromReference(ctx, sys, ref) } // NewImageSource returns a types.ImageSource for this reference. diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go index 9a48cb46c..f00b77930 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/transport.go +++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go @@ -8,7 +8,7 @@ import ( "github.com/containers/image/v5/docker/internal/tarfile" "github.com/containers/image/v5/docker/reference" - ctrImage "github.com/containers/image/v5/image" + ctrImage "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/pkg/errors" @@ -185,11 +185,7 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - return ctrImage.FromSource(ctx, sys, src) + return ctrImage.FromReference(ctx, sys, ref) } // NewImageSource returns a types.ImageSource for this reference. diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go index 4e4ed6881..d75579784 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go @@ -6,7 +6,7 @@ import ( "github.com/containers/image/v5/docker/policyconfiguration" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" @@ -195,11 +195,7 @@ func (ref daemonReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) + return image.FromReference(ctx, sys, ref) } // NewImageSource returns a types.ImageSource for this reference. diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index daac45f87..29c256869 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -163,9 +163,8 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { func serverDefault() *tls.Config { return &tls.Config{ // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, + MinVersion: tls.VersionTLS10, + CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, } } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go index c84bb37d2..73687e86f 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" diff --git a/vendor/github.com/containers/image/v5/image/docker_list.go b/vendor/github.com/containers/image/v5/image/docker_list.go deleted file mode 100644 index af78ac1df..000000000 --- a/vendor/github.com/containers/image/v5/image/docker_list.go +++ /dev/null @@ -1,34 +0,0 @@ -package image - -import ( - "context" - - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" - "github.com/pkg/errors" -) - -func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { - list, err := manifest.Schema2ListFromManifest(manblob) - if err != nil { - return nil, errors.Wrapf(err, "parsing schema2 manifest list") - } - targetManifestDigest, err := list.ChooseInstance(sys) - if err != nil { - return nil, errors.Wrapf(err, "choosing image instance") - } - manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) - if err != nil { - return nil, errors.Wrapf(err, "fetching target platform image selected from manifest list") - } - - matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) - if err != nil { - return nil, errors.Wrap(err, "computing manifest digest") - } - if !matches { - return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) - } - - return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) -} diff --git a/vendor/github.com/containers/image/v5/image/docker_schema1.go b/vendor/github.com/containers/image/v5/image/docker_schema1.go deleted file mode 100644 index 5f24970c3..000000000 --- a/vendor/github.com/containers/image/v5/image/docker_schema1.go +++ /dev/null @@ -1,248 +0,0 @@ -package image - -import ( - "context" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type manifestSchema1 struct { - m *manifest.Schema1 -} - -func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { - m, err := manifest.Schema1FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestSchema1{m: m}, nil -} - -// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. -func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) { - m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture) - if err != nil { - return nil, err - } - return &manifestSchema1{m: m}, nil -} - -func (m *manifestSchema1) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestSchema1) manifestMIMEType() string { - return manifest.DockerV2Schema1SignedMediaType -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestSchema1) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) { - return nil, nil -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - v2s2, err := m.convertToManifestSchema2(ctx, &types.ManifestUpdateOptions{}) - if err != nil { - return nil, err - } - return v2s2.OCIConfig(ctx) -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestSchema1) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - // This is a bit convoluted: We can’t just have a "get embedded docker reference" method - // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually - // embed a full docker/distribution reference, but only the repo name and tag (without the host name). - // So we would have to provide a “return repo without host name, and tag” getter for the generic code, - // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the - // generic copy code needs to know about is reference.Named and that a manifest may need updating - // for some destinations. - name := reference.Path(ref) - var tag string - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - tag = tagged.Tag() - } else { - tag = "" - } - return m.m.Name != name || m.m.Tag != tag -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestSchema1) Inspect(context.Context) (*types.ImageInspectInfo, error) { - return m.m.Inspect(nil) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest) -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} - - // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, - // handle conversions between them by doing nothing. - if options.ManifestMIMEType != manifest.DockerV2Schema1MediaType && options.ManifestMIMEType != manifest.DockerV2Schema1SignedMediaType { - converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ - imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, - manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, - }) - if err != nil { - return nil, err - } - - if converted != nil { - return converted, nil - } - } - - // No conversion required, update manifest - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - if options.EmbeddedDockerReference != nil { - copy.m.Name = reference.Path(options.EmbeddedDockerReference) - if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { - copy.m.Tag = tagged.Tag() - } else { - copy.m.Tag = "" - } - } - - return memoryImageFromManifest(©), nil -} - -// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema1 object. -// -// We need this function just because a function returning an implementation of the genericManifest -// interface is not automatically assignable to a function type returning the genericManifest interface -func (m *manifestSchema1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - return m.convertToManifestSchema2(ctx, options) -} - -// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema1 object. -// -// Based on github.com/docker/docker/distribution/pull_v2.go -func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) { - uploadedLayerInfos := options.InformationOnly.LayerInfos - layerDiffIDs := options.InformationOnly.LayerDiffIDs - - if len(m.m.ExtractedV1Compatibility) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. - return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) - } - if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) { - return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) - } - if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { - return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) - } - if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { - return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) - } - - var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil - if options.LayerInfos != nil { - if len(options.LayerInfos) != len(m.m.FSLayers) { - return nil, errors.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", - len(options.LayerInfos), len(m.m.FSLayers)) - } - convertedLayerUpdates = []types.BlobInfo{} - } - - // Build a list of the diffIDs for the non-empty layers. - diffIDs := []digest.Digest{} - var layers []manifest.Schema2Descriptor - for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- { - v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index - - if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway { - var size int64 - if uploadedLayerInfos != nil { - size = uploadedLayerInfos[v2Index].Size - } - var d digest.Digest - if layerDiffIDs != nil { - d = layerDiffIDs[v2Index] - } - layers = append(layers, manifest.Schema2Descriptor{ - MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Size: size, - Digest: m.m.FSLayers[v1Index].BlobSum, - }) - if options.LayerInfos != nil { - convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[v2Index]) - } - diffIDs = append(diffIDs, d) - } - } - configJSON, err := m.m.ToSchema2Config(diffIDs) - if err != nil { - return nil, err - } - configDescriptor := manifest.Schema2Descriptor{ - MediaType: "application/vnd.docker.container.image.v1+json", - Size: int64(len(configJSON)), - Digest: digest.FromBytes(configJSON), - } - - if options.LayerInfos != nil { - options.LayerInfos = convertedLayerUpdates - } - return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil -} - -// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema1 object. -func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest - m2, err := m.convertToManifestSchema2(ctx, options) - if err != nil { - return nil, err - } - - return m2.convertToManifestOCI1(ctx, options) -} - -// SupportsEncryption returns if encryption is supported for the manifest type -func (m *manifestSchema1) SupportsEncryption(context.Context) bool { - return false -} diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go index b250a6b1d..e5a3b8991 100644 --- a/vendor/github.com/containers/image/v5/image/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go @@ -1,400 +1,14 @@ package image import ( - "bytes" - "context" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "fmt" - "strings" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/iolimits" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/blobinfocache/none" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" + "github.com/containers/image/v5/internal/image" ) // GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) // This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is // a non-zero embedded timestamp; we could zero that, but that would just waste storage space // in registries, so let’s use the same values. -var GzippedEmptyLayer = []byte{ - 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, - 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, -} +var GzippedEmptyLayer = image.GzippedEmptyLayer // GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer -const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") - -type manifestSchema2 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - m *manifest.Schema2 -} - -func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { - m, err := manifest.Schema2FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestSchema2{ - src: src, - m: m, - }, nil -} - -// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: -func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 { - return &manifestSchema2{ - src: src, - configBlob: configBlob, - m: manifest.Schema2FromComponents(config, layers), - } -} - -func (m *manifestSchema2) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestSchema2) manifestMIMEType() string { - return m.m.MediaType -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestSchema2) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - configBlob, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields - // than OCI v1. This unmarshal makes sure we drop docker v2s2 - // fields that aren't needed in OCI v1. - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(configBlob, configOCI); err != nil { - return nil, err - } - return configOCI, nil -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { - if m.configBlob == nil { - if m.src == nil { - return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") - } - stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) - if err != nil { - return nil, err - } - defer stream.Close() - blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) - if err != nil { - return nil, err - } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) - } - m.configBlob = blob - } - return m.configBlob, nil -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestSchema2) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - return false -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { - getter := func(info types.BlobInfo) ([]byte, error) { - if info.Digest != m.ConfigInfo().Digest { - // Shouldn't ever happen - return nil, errors.New("asked for a different config blob") - } - config, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - return config, nil - } - return m.m.Inspect(getter) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return false -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError -// if the CompressionOperation and CompressionAlgorithm specified in one or more -// options.LayerInfos items is anything other than gzip. -func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. - src: m.src, - configBlob: m.configBlob, - m: manifest.Schema2Clone(m.m), - } - - converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ - manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, - manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, - imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, - }) - if err != nil { - return nil, err - } - - if converted != nil { - return converted, nil - } - - // No conversion required, update manifest - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. - - return memoryImageFromManifest(©), nil -} - -func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { - return imgspecv1.Descriptor{ - MediaType: d.MediaType, - Size: d.Size, - Digest: d.Digest, - URLs: d.URLs, - } -} - -// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema2 object. -func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) { - configOCI, err := m.OCIConfig(ctx) - if err != nil { - return nil, err - } - configOCIBytes, err := json.Marshal(configOCI) - if err != nil { - return nil, err - } - - config := imgspecv1.Descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Size: int64(len(configOCIBytes)), - Digest: digest.FromBytes(configOCIBytes), - } - - layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) - for idx := range layers { - layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) - switch m.m.LayersDescriptors[idx].MediaType { - case manifest.DockerV2Schema2ForeignLayerMediaType: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable - case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip - case manifest.DockerV2SchemaLayerMediaTypeUncompressed: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayer - case manifest.DockerV2Schema2LayerMediaType: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip - default: - return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType) - } - } - - return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil -} - -// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema2 object. -// -// Based on docker/distribution/manifest/schema1/config_builder.go -func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - dest := options.InformationOnly.Destination - - var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil - if options.LayerInfos != nil { - if len(options.LayerInfos) != len(m.m.LayersDescriptors) { - return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", - len(options.LayerInfos), len(m.m.LayersDescriptors)) - } - convertedLayerUpdates = []types.BlobInfo{} - } - - configBytes, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - imageConfig := &manifest.Schema2Image{} - if err := json.Unmarshal(configBytes, imageConfig); err != nil { - return nil, err - } - - // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. - fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) - history := make([]manifest.Schema1History, len(imageConfig.History)) - nonemptyLayerIndex := 0 - var parentV1ID string // Set in the loop - v1ID := "" - haveGzippedEmptyLayer := false - if len(imageConfig.History) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. - return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) - } - for v2Index, historyEntry := range imageConfig.History { - parentV1ID = v1ID - v1Index := len(imageConfig.History) - 1 - v2Index - - var blobDigest digest.Digest - if historyEntry.EmptyLayer { - emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))} - - if !haveGzippedEmptyLayer { - logrus.Debugf("Uploading empty layer during conversion to schema 1") - // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, - // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. - info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false) - if err != nil { - return nil, errors.Wrap(err, "uploading empty layer") - } - if info.Digest != emptyLayerBlobInfo.Digest { - return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest) - } - haveGzippedEmptyLayer = true - } - if options.LayerInfos != nil { - convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo) - } - blobDigest = emptyLayerBlobInfo.Digest - } else { - if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { - return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) - } - if options.LayerInfos != nil { - convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex]) - } - blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest - nonemptyLayerIndex++ - } - - // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. - v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) - if err != nil { - return nil, err - } - v1ID = v - - fakeImage := manifest.Schema1V1Compatibility{ - ID: v1ID, - Parent: parentV1ID, - Comment: historyEntry.Comment, - Created: historyEntry.Created, - Author: historyEntry.Author, - ThrowAway: historyEntry.EmptyLayer, - } - fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} - v1CompatibilityBytes, err := json.Marshal(&fakeImage) - if err != nil { - return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) - } - - fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} - history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} - // Note that parentV1ID of the top layer is preserved when exiting this loop - } - - // Now patch in real configuration for the top layer (v1Index == 0) - v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. - if err != nil { - return nil, err - } - v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) - if err != nil { - return nil, err - } - history[0].V1Compatibility = string(v1Config) - - if options.LayerInfos != nil { - options.LayerInfos = convertedLayerUpdates - } - m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) - if err != nil { - return nil, err // This should never happen, we should have created all the components correctly. - } - return m1, nil -} - -func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { - if err := blobDigest.Validate(); err != nil { - return "", err - } - parts := append([]string{blobDigest.Hex()}, others...) - v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) - return hex.EncodeToString(v1IDHash[:]), nil -} - -func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { - // Preserve everything we don't specifically know about. - // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) - rawContents := map[string]*json.RawMessage{} - if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! - return nil, err - } - delete(rawContents, "rootfs") - delete(rawContents, "history") - - updates := map[string]interface{}{"id": v1ID} - if parentV1ID != "" { - updates["parent"] = parentV1ID - } - if throwaway { - updates["throwaway"] = throwaway - } - for field, value := range updates { - encoded, err := json.Marshal(value) - if err != nil { - return nil, err - } - rawContents[field] = (*json.RawMessage)(&encoded) - } - return json.Marshal(rawContents) -} - -// SupportsEncryption returns if encryption is supported for the manifest type -func (m *manifestSchema2) SupportsEncryption(context.Context) bool { - return false -} +const GzippedEmptyLayerDigest = image.GzippedEmptyLayerDigest diff --git a/vendor/github.com/containers/image/v5/image/manifest.go b/vendor/github.com/containers/image/v5/image/manifest.go deleted file mode 100644 index 36d70b5c2..000000000 --- a/vendor/github.com/containers/image/v5/image/manifest.go +++ /dev/null @@ -1,113 +0,0 @@ -package image - -import ( - "context" - "fmt" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// genericManifest is an interface for parsing, modifying image manifests and related data. -// Note that the public methods are intended to be a subset of types.Image -// so that embedding a genericManifest into structs works. -// will support v1 one day... -type genericManifest interface { - serialize() ([]byte, error) - manifestMIMEType() string - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. - ConfigInfo() types.BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. - // The result is cached; it is OK to call this however often you need. - ConfigBlob(context.Context) ([]byte, error) - // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about - // layers in the resulting configuration isn't guaranteed to be returned to due how - // old image manifests work (docker v2s1 especially). - OCIConfig(context.Context) (*imgspecv1.Image, error) - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []types.BlobInfo - // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. - // It returns false if the manifest does not embed a Docker reference. - // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) - EmbeddedDockerReferenceConflicts(ref reference.Named) bool - // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. - Inspect(context.Context) (*types.ImageInspectInfo, error) - // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. - // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute - // (most importantly it forces us to download the full layers even if they are already present at the destination). - UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool - // UpdatedImage returns a types.Image modified according to options. - // This does not change the state of the original Image object. - UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) - // SupportsEncryption returns if encryption is supported for the manifest type - // - // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since - // the process of updating a manifest between different manifest types was to update then convert. - // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 - SupportsEncryption(ctx context.Context) bool -} - -// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. -// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. -func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { - switch manifest.NormalizedMIMEType(mt) { - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - return manifestSchema1FromManifest(manblob) - case imgspecv1.MediaTypeImageManifest: - return manifestOCI1FromManifest(src, manblob) - case manifest.DockerV2Schema2MediaType: - return manifestSchema2FromManifest(src, manblob) - case manifest.DockerV2ListMediaType: - return manifestSchema2FromManifestList(ctx, sys, src, manblob) - case imgspecv1.MediaTypeImageIndex: - return manifestOCI1FromImageIndex(ctx, sys, src, manblob) - default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) - } -} - -// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo. -func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo { - blobs := make([]types.BlobInfo, len(layers)) - for i, layer := range layers { - blobs[i] = layer.BlobInfo - } - return blobs -} - -// manifestConvertFn (a method of genericManifest object) returns a genericManifest implementation -// converted to a specific manifest MIME type. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original genericManifest object. -type manifestConvertFn func(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) - -// convertManifestIfRequiredWithUpdate will run conversion functions of a manifest if -// required and re-apply the options to the converted type. -// It returns (nil, nil) if no conversion was requested. -func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) { - if options.ManifestMIMEType == "" { - return nil, nil - } - - converter, ok := converters[options.ManifestMIMEType] - if !ok { - return nil, errors.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType) - } - - optionsCopy := options - convertedManifest, err := converter(ctx, &optionsCopy) - if err != nil { - return nil, err - } - convertedImage := memoryImageFromManifest(convertedManifest) - - optionsCopy.ManifestMIMEType = "" - return convertedImage.UpdatedImage(ctx, optionsCopy) -} diff --git a/vendor/github.com/containers/image/v5/image/memory.go b/vendor/github.com/containers/image/v5/image/memory.go deleted file mode 100644 index 4c96b37d8..000000000 --- a/vendor/github.com/containers/image/v5/image/memory.go +++ /dev/null @@ -1,64 +0,0 @@ -package image - -import ( - "context" - - "github.com/containers/image/v5/types" - "github.com/pkg/errors" -) - -// memoryImage is a mostly-implementation of types.Image assembled from data -// created in memory, used primarily as a return value of types.Image.UpdatedImage -// as a way to carry various structured information in a type-safe and easy-to-use way. -// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone -// collection of all related information, e.g. there is no way to get layer blobs -// from a memoryImage. -type memoryImage struct { - genericManifest - serializedManifest []byte // A private cache for Manifest() -} - -func memoryImageFromManifest(m genericManifest) types.Image { - return &memoryImage{ - genericManifest: m, - serializedManifest: nil, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *memoryImage) Reference() types.ImageReference { - // It would really be inappropriate to return the ImageReference of the image this was based on. - return nil -} - -// Size returns the size of the image as stored, if known, or -1 if not. -func (i *memoryImage) Size() (int64, error) { - return -1, nil -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) { - if i.serializedManifest == nil { - m, err := i.genericManifest.serialize() - if err != nil { - return nil, "", err - } - i.serializedManifest = m - } - return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) { - // Modifying an image invalidates signatures; a caller asking the updated image for signatures - // is probably confused. - return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") -} - -// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/v5/image/oci.go b/vendor/github.com/containers/image/v5/image/oci.go deleted file mode 100644 index 58e9c03ba..000000000 --- a/vendor/github.com/containers/image/v5/image/oci.go +++ /dev/null @@ -1,248 +0,0 @@ -package image - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/iolimits" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/blobinfocache/none" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type manifestOCI1 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of m.Config. - m *manifest.OCI1 -} - -func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { - m, err := manifest.OCI1FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestOCI1{ - src: src, - m: m, - }, nil -} - -// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: -func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { - return &manifestOCI1{ - src: src, - configBlob: configBlob, - m: manifest.OCI1FromComponents(config, layers), - } -} - -func (m *manifestOCI1) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestOCI1) manifestMIMEType() string { - return imgspecv1.MediaTypeImageManifest -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestOCI1) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { - if m.configBlob == nil { - if m.src == nil { - return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") - } - stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache) - if err != nil { - return nil, err - } - defer stream.Close() - blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) - if err != nil { - return nil, err - } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.Config.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) - } - m.configBlob = blob - } - return m.configBlob, nil -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - cb, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(cb, configOCI); err != nil { - return nil, err - } - return configOCI, nil -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestOCI1) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - return false -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { - getter := func(info types.BlobInfo) ([]byte, error) { - if info.Digest != m.ConfigInfo().Digest { - // Shouldn't ever happen - return nil, errors.New("asked for a different config blob") - } - config, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - return config, nil - } - return m.m.Inspect(getter) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return false -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError -// if the combination of CompressionOperation and CompressionAlgorithm specified -// in one or more options.LayerInfos items indicates that a layer is compressed using -// an algorithm that is not allowed in OCI. -func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. - src: m.src, - configBlob: m.configBlob, - m: manifest.OCI1Clone(m.m), - } - - converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ - manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, - manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, - manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, - }) - if err != nil { - return nil, err - } - - if converted != nil { - return converted, nil - } - - // No conversion required, update manifest - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. - - return memoryImageFromManifest(©), nil -} - -func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { - return manifest.Schema2Descriptor{ - MediaType: d.MediaType, - Size: d.Size, - Digest: d.Digest, - URLs: d.URLs, - } -} - -// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema1 object. -// -// We need this function just because a function returning an implementation of the genericManifest -// interface is not automatically assignable to a function type returning the genericManifest interface -func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - return m.convertToManifestSchema2(ctx, options) -} - -// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestOCI1 object. -func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) { - // Create a copy of the descriptor. - config := schema2DescriptorFromOCI1Descriptor(m.m.Config) - - // The only difference between OCI and DockerSchema2 is the mediatypes. The - // media type of the manifest is handled by manifestSchema2FromComponents. - config.MediaType = manifest.DockerV2Schema2ConfigMediaType - - layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) - for idx := range layers { - layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) - switch layers[idx].MediaType { - case imgspecv1.MediaTypeImageLayerNonDistributable: - layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType - case imgspecv1.MediaTypeImageLayerNonDistributableGzip: - layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip - case imgspecv1.MediaTypeImageLayerNonDistributableZstd: - return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) - case imgspecv1.MediaTypeImageLayer: - layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed - case imgspecv1.MediaTypeImageLayerGzip: - layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType - case imgspecv1.MediaTypeImageLayerZstd: - return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) - default: - return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) - } - } - - // Rather than copying the ConfigBlob now, we just pass m.src to the - // translated manifest, since the only difference is the mediatype of - // descriptors there is no change to any blob stored in m.src. - return manifestSchema2FromComponents(config, m.src, nil, layers), nil -} - -// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestOCI1 object. -func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - // We can't directly convert to V1, but we can transitively convert via a V2 image - m2, err := m.convertToManifestSchema2(ctx, options) - if err != nil { - return nil, err - } - - return m2.convertToManifestSchema1(ctx, options) -} - -// SupportsEncryption returns if encryption is supported for the manifest type -func (m *manifestOCI1) SupportsEncryption(context.Context) bool { - return true -} diff --git a/vendor/github.com/containers/image/v5/image/oci_index.go b/vendor/github.com/containers/image/v5/image/oci_index.go deleted file mode 100644 index d6e6685b1..000000000 --- a/vendor/github.com/containers/image/v5/image/oci_index.go +++ /dev/null @@ -1,34 +0,0 @@ -package image - -import ( - "context" - - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" - "github.com/pkg/errors" -) - -func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { - index, err := manifest.OCI1IndexFromManifest(manblob) - if err != nil { - return nil, errors.Wrapf(err, "parsing OCI1 index") - } - targetManifestDigest, err := index.ChooseInstance(sys) - if err != nil { - return nil, errors.Wrapf(err, "choosing image instance") - } - manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) - if err != nil { - return nil, errors.Wrapf(err, "fetching target platform image selected from image index") - } - - matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) - if err != nil { - return nil, errors.Wrap(err, "computing manifest digest") - } - if !matches { - return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) - } - - return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) -} diff --git a/vendor/github.com/containers/image/v5/image/sourced.go b/vendor/github.com/containers/image/v5/image/sourced.go index 3a016e1d0..2b7f6b144 100644 --- a/vendor/github.com/containers/image/v5/image/sourced.go +++ b/vendor/github.com/containers/image/v5/image/sourced.go @@ -6,17 +6,10 @@ package image import ( "context" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/types" ) -// imageCloser implements types.ImageCloser, perhaps allowing simple users -// to use a single object without having keep a reference to a types.ImageSource -// only to call types.ImageSource.Close(). -type imageCloser struct { - types.Image - src types.ImageSource -} - // FromSource returns a types.ImageCloser implementation for the default instance of source. // If source is a manifest list, .Manifest() still returns the manifest list, // but other methods transparently return data from an appropriate image instance. @@ -31,33 +24,7 @@ type imageCloser struct { // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { - img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) - if err != nil { - return nil, err - } - return &imageCloser{ - Image: img, - src: src, - }, nil -} - -func (ic *imageCloser) Close() error { - return ic.src.Close() -} - -// sourcedImage is a general set of utilities for working with container images, -// whatever is their underlying location (i.e. dockerImageSource-independent). -// Note the existence of skopeo/docker.Image: some instances of a `types.Image` -// may not be a `sourcedImage` directly. However, most users of `types.Image` -// do not care, and those who care about `skopeo/docker.Image` know they do. -type sourcedImage struct { - *UnparsedImage - manifestBlob []byte - manifestMIMEType string - // genericManifest contains data corresponding to manifestBlob. - // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest - // if you want to preserve the original manifest; use manifestBlob directly. - genericManifest + return image.FromSource(ctx, sys, src) } // FromUnparsedImage returns a types.Image implementation for unparsed. @@ -66,39 +33,5 @@ type sourcedImage struct { // // The Image must not be used after the underlying ImageSource is Close()d. func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { - // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: - // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, - // this is the only UnparsedImage implementation around, anyway. - - // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). - manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx) - if err != nil { - return nil, err - } - - parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType) - if err != nil { - return nil, err - } - - return &sourcedImage{ - UnparsedImage: unparsed, - manifestBlob: manifestBlob, - manifestMIMEType: manifestMIMEType, - genericManifest: parsedManifest, - }, nil -} - -// Size returns the size of the image as stored, if it's known, or -1 if it isn't. -func (i *sourcedImage) Size() (int64, error) { - return -1, nil -} - -// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. -func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { - return i.manifestBlob, i.manifestMIMEType, nil -} - -func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest) + return image.FromUnparsedImage(ctx, sys, unparsed) } diff --git a/vendor/github.com/containers/image/v5/image/unparsed.go b/vendor/github.com/containers/image/v5/image/unparsed.go index c64852f72..123f6ce6f 100644 --- a/vendor/github.com/containers/image/v5/image/unparsed.go +++ b/vendor/github.com/containers/image/v5/image/unparsed.go @@ -1,95 +1,19 @@ package image import ( - "context" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) // UnparsedImage implements types.UnparsedImage . // An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -type UnparsedImage struct { - src types.ImageSource - instanceDigest *digest.Digest - cachedManifest []byte // A private cache for Manifest(); nil if not yet known. - // A private cache for Manifest(), may be the empty string if guessing failed. - // Valid iff cachedManifest is not nil. - cachedManifestMIMEType string - cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. -} +type UnparsedImage = image.UnparsedImage // UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). // // The UnparsedImage must not be used after the underlying ImageSource is Close()d. func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { - return &UnparsedImage{ - src: src, - instanceDigest: instanceDigest, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *UnparsedImage) Reference() types.ImageReference { - // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. - return i.src.Reference() -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { - if i.cachedManifest == nil { - m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) - if err != nil { - return nil, "", err - } - - // ImageSource.GetManifest does not do digest verification, but we do; - // this immediately protects also any user of types.Image. - if digest, haveDigest := i.expectedManifestDigest(); haveDigest { - matches, err := manifest.MatchesDigest(m, digest) - if err != nil { - return nil, "", errors.Wrap(err, "computing manifest digest") - } - if !matches { - return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) - } - } - - i.cachedManifest = m - i.cachedManifestMIMEType = mt - } - return i.cachedManifest, i.cachedManifestMIMEType, nil -} - -// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. -// The bool return value seems redundant with digest != ""; it is used explicitly -// to refuse (unexpected) situations when the digest exists but is "". -func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { - if i.instanceDigest != nil { - return *i.instanceDigest, true - } - ref := i.Reference().DockerReference() - if ref != nil { - if canonical, ok := ref.(reference.Canonical); ok { - return canonical.Digest(), true - } - } - return "", false -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { - if i.cachedSignatures == nil { - sigs, err := i.src.GetSignatures(ctx, i.instanceDigest) - if err != nil { - return nil, err - } - i.cachedSignatures = sigs - } - return i.cachedSignatures, nil + return image.UnparsedInstance(src, instanceDigest) } diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_list.go b/vendor/github.com/containers/image/v5/internal/image/docker_list.go new file mode 100644 index 000000000..af78ac1df --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/docker_list.go @@ -0,0 +1,34 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + list, err := manifest.Schema2ListFromManifest(manblob) + if err != nil { + return nil, errors.Wrapf(err, "parsing schema2 manifest list") + } + targetManifestDigest, err := list.ChooseInstance(sys) + if err != nil { + return nil, errors.Wrapf(err, "choosing image instance") + } + manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) + if err != nil { + return nil, errors.Wrapf(err, "fetching target platform image selected from manifest list") + } + + matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) + if err != nil { + return nil, errors.Wrap(err, "computing manifest digest") + } + if !matches { + return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) + } + + return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) +} diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go new file mode 100644 index 000000000..94f776224 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go @@ -0,0 +1,257 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type manifestSchema1 struct { + m *manifest.Schema1 +} + +func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema1FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestSchema1{m: m}, nil +} + +// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. +func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) { + m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture) + if err != nil { + return nil, err + } + return &manifestSchema1{m: m}, nil +} + +func (m *manifestSchema1) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestSchema1) manifestMIMEType() string { + return manifest.DockerV2Schema1SignedMediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema1) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) { + return nil, nil +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + v2s2, err := m.convertToManifestSchema2(ctx, &types.ManifestUpdateOptions{}) + if err != nil { + return nil, err + } + return v2s2.OCIConfig(ctx) +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema1) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + // This is a bit convoluted: We can’t just have a "get embedded docker reference" method + // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually + // embed a full docker/distribution reference, but only the repo name and tag (without the host name). + // So we would have to provide a “return repo without host name, and tag” getter for the generic code, + // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the + // generic copy code needs to know about is reference.Named and that a manifest may need updating + // for some destinations. + name := reference.Path(ref) + var tag string + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } else { + tag = "" + } + return m.m.Name != name || m.m.Tag != tag +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestSchema1) Inspect(context.Context) (*types.ImageInspectInfo, error) { + return m.m.Inspect(nil) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest) +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} + + // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, + // handle conversions between them by doing nothing. + if options.ManifestMIMEType != manifest.DockerV2Schema1MediaType && options.ManifestMIMEType != manifest.DockerV2Schema1SignedMediaType { + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, + manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + } + + // No conversion required, update manifest + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + if options.EmbeddedDockerReference != nil { + copy.m.Name = reference.Path(options.EmbeddedDockerReference) + if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { + copy.m.Tag = tagged.Tag() + } else { + copy.m.Tag = "" + } + } + + return memoryImageFromManifest(©), nil +} + +// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +// +// We need this function just because a function returning an implementation of the genericManifest +// interface is not automatically assignable to a function type returning the genericManifest interface +func (m *manifestSchema1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + return m.convertToManifestSchema2(ctx, options) +} + +// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +// +// Based on github.com/docker/docker/distribution/pull_v2.go +func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) { + uploadedLayerInfos := options.InformationOnly.LayerInfos + layerDiffIDs := options.InformationOnly.LayerDiffIDs + + if len(m.m.ExtractedV1Compatibility) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. + return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) + } + if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) { + return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) + } + if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) + } + if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) + } + + var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil + if options.LayerInfos != nil { + if len(options.LayerInfos) != len(m.m.FSLayers) { + return nil, errors.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", + len(options.LayerInfos), len(m.m.FSLayers)) + } + convertedLayerUpdates = []types.BlobInfo{} + } + + // Build a list of the diffIDs for the non-empty layers. + diffIDs := []digest.Digest{} + var layers []manifest.Schema2Descriptor + for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- { + v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index + + if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway { + var size int64 + if uploadedLayerInfos != nil { + size = uploadedLayerInfos[v2Index].Size + } + var d digest.Digest + if layerDiffIDs != nil { + d = layerDiffIDs[v2Index] + } + layers = append(layers, manifest.Schema2Descriptor{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Size: size, + Digest: m.m.FSLayers[v1Index].BlobSum, + }) + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[v2Index]) + } + diffIDs = append(diffIDs, d) + } + } + configJSON, err := m.m.ToSchema2Config(diffIDs) + if err != nil { + return nil, err + } + configDescriptor := manifest.Schema2Descriptor{ + MediaType: "application/vnd.docker.container.image.v1+json", + Size: int64(len(configJSON)), + Digest: digest.FromBytes(configJSON), + } + + if options.LayerInfos != nil { + options.LayerInfos = convertedLayerUpdates + } + return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil +} + +// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest + m2, err := m.convertToManifestSchema2(ctx, options) + if err != nil { + return nil, err + } + + return m2.convertToManifestOCI1(ctx, options) +} + +// SupportsEncryption returns if encryption is supported for the manifest type +func (m *manifestSchema1) SupportsEncryption(context.Context) bool { + return false +} + +// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image +// (and the code can handle that). +// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted +// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts +// to a different manifest format). +func (m *manifestSchema1) CanChangeLayerCompression(mimeType string) bool { + return true // There are no MIME types in the manifest, so we must assume a valid image. +} diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go new file mode 100644 index 000000000..7dfd3c5d8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go @@ -0,0 +1,413 @@ +package image + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) +// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is +// a non-zero embedded timestamp; we could zero that, but that would just waste storage space +// in registries, so let’s use the same values. +// +// This is publicly visible as c/image/image.GzippedEmptyLayer. +var GzippedEmptyLayer = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer +// +// This is publicly visible as c/image/image.GzippedEmptyLayerDigest. +const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +type manifestSchema2 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + m *manifest.Schema2 +} + +func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema2FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestSchema2{ + src: src, + m: m, + }, nil +} + +// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: +func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 { + return &manifestSchema2{ + src: src, + configBlob: configBlob, + m: manifest.Schema2FromComponents(config, layers), + } +} + +func (m *manifestSchema2) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestSchema2) manifestMIMEType() string { + return m.m.MediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema2) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + configBlob, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields + // than OCI v1. This unmarshal makes sure we drop docker v2s2 + // fields that aren't needed in OCI v1. + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(configBlob, configOCI); err != nil { + return nil, err + } + return configOCI, nil +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") + } + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.m.ConfigDescriptor.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema2) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + return false +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + return config, nil + } + return m.m.Inspect(getter) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError +// if the CompressionOperation and CompressionAlgorithm specified in one or more +// options.LayerInfos items is anything other than gzip. +func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.Schema2Clone(m.m), + } + + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, + manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, + imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + + // No conversion required, update manifest + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. + + return memoryImageFromManifest(©), nil +} + +func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { + return imgspecv1.Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + +// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema2 object. +func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) { + configOCI, err := m.OCIConfig(ctx) + if err != nil { + return nil, err + } + configOCIBytes, err := json.Marshal(configOCI) + if err != nil { + return nil, err + } + + config := imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Size: int64(len(configOCIBytes)), + Digest: digest.FromBytes(configOCIBytes), + } + + layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) + for idx := range layers { + layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) + switch m.m.LayersDescriptors[idx].MediaType { + case manifest.DockerV2Schema2ForeignLayerMediaType: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable + case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip + case manifest.DockerV2SchemaLayerMediaTypeUncompressed: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema2LayerMediaType: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip + default: + return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType) + } + } + + return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil +} + +// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema2 object. +// +// Based on docker/distribution/manifest/schema1/config_builder.go +func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + dest := options.InformationOnly.Destination + + var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil + if options.LayerInfos != nil { + if len(options.LayerInfos) != len(m.m.LayersDescriptors) { + return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", + len(options.LayerInfos), len(m.m.LayersDescriptors)) + } + convertedLayerUpdates = []types.BlobInfo{} + } + + configBytes, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + imageConfig := &manifest.Schema2Image{} + if err := json.Unmarshal(configBytes, imageConfig); err != nil { + return nil, err + } + + // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. + fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) + history := make([]manifest.Schema1History, len(imageConfig.History)) + nonemptyLayerIndex := 0 + var parentV1ID string // Set in the loop + v1ID := "" + haveGzippedEmptyLayer := false + if len(imageConfig.History) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. + return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) + } + for v2Index, historyEntry := range imageConfig.History { + parentV1ID = v1ID + v1Index := len(imageConfig.History) - 1 - v2Index + + var blobDigest digest.Digest + if historyEntry.EmptyLayer { + emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))} + + if !haveGzippedEmptyLayer { + logrus.Debugf("Uploading empty layer during conversion to schema 1") + // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, + // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. + info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false) + if err != nil { + return nil, errors.Wrap(err, "uploading empty layer") + } + if info.Digest != emptyLayerBlobInfo.Digest { + return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest) + } + haveGzippedEmptyLayer = true + } + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo) + } + blobDigest = emptyLayerBlobInfo.Digest + } else { + if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { + return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) + } + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex]) + } + blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest + nonemptyLayerIndex++ + } + + // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. + v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) + if err != nil { + return nil, err + } + v1ID = v + + fakeImage := manifest.Schema1V1Compatibility{ + ID: v1ID, + Parent: parentV1ID, + Comment: historyEntry.Comment, + Created: historyEntry.Created, + Author: historyEntry.Author, + ThrowAway: historyEntry.EmptyLayer, + } + fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} + v1CompatibilityBytes, err := json.Marshal(&fakeImage) + if err != nil { + return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) + } + + fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} + history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} + // Note that parentV1ID of the top layer is preserved when exiting this loop + } + + // Now patch in real configuration for the top layer (v1Index == 0) + v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. + if err != nil { + return nil, err + } + v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) + if err != nil { + return nil, err + } + history[0].V1Compatibility = string(v1Config) + + if options.LayerInfos != nil { + options.LayerInfos = convertedLayerUpdates + } + m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) + if err != nil { + return nil, err // This should never happen, we should have created all the components correctly. + } + return m1, nil +} + +func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { + if err := blobDigest.Validate(); err != nil { + return "", err + } + parts := append([]string{blobDigest.Hex()}, others...) + v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) + return hex.EncodeToString(v1IDHash[:]), nil +} + +func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Preserve everything we don't specifically know about. + // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) + rawContents := map[string]*json.RawMessage{} + if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! + return nil, err + } + delete(rawContents, "rootfs") + delete(rawContents, "history") + + updates := map[string]interface{}{"id": v1ID} + if parentV1ID != "" { + updates["parent"] = parentV1ID + } + if throwaway { + updates["throwaway"] = throwaway + } + for field, value := range updates { + encoded, err := json.Marshal(value) + if err != nil { + return nil, err + } + rawContents[field] = (*json.RawMessage)(&encoded) + } + return json.Marshal(rawContents) +} + +// SupportsEncryption returns if encryption is supported for the manifest type +func (m *manifestSchema2) SupportsEncryption(context.Context) bool { + return false +} + +// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image +// (and the code can handle that). +// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted +// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts +// to a different manifest format). +func (m *manifestSchema2) CanChangeLayerCompression(mimeType string) bool { + return m.m.CanChangeLayerCompression(mimeType) +} diff --git a/vendor/github.com/containers/image/v5/internal/image/manifest.go b/vendor/github.com/containers/image/v5/internal/image/manifest.go new file mode 100644 index 000000000..6b5f34538 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/manifest.go @@ -0,0 +1,122 @@ +package image + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// genericManifest is an interface for parsing, modifying image manifests and related data. +// The public methods are related to types.Image so that embedding a genericManifest implements most of it, +// but there are also public methods that are only visible by packages that can import c/image/internal/image. +type genericManifest interface { + serialize() ([]byte, error) + manifestMIMEType() string + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() types.BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob(context.Context) ([]byte, error) + // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about + // layers in the resulting configuration isn't guaranteed to be returned to due how + // old image manifests work (docker v2s1 especially). + OCIConfig(context.Context) (*imgspecv1.Image, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []types.BlobInfo + // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. + // It returns false if the manifest does not embed a Docker reference. + // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) + EmbeddedDockerReferenceConflicts(ref reference.Named) bool + // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. + Inspect(context.Context) (*types.ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // This does not change the state of the original Image object. + UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) + // SupportsEncryption returns if encryption is supported for the manifest type + // + // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since + // the process of updating a manifest between different manifest types was to update then convert. + // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 + SupportsEncryption(ctx context.Context) bool + + // The following methods are not a part of types.Image: + // === + + // CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image + // (and the code can handle that). + // NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted + // algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts + // to a different manifest format). + CanChangeLayerCompression(mimeType string) bool +} + +// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. +// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. +func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { + switch manifest.NormalizedMIMEType(mt) { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + return manifestSchema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return manifestOCI1FromManifest(src, manblob) + case manifest.DockerV2Schema2MediaType: + return manifestSchema2FromManifest(src, manblob) + case manifest.DockerV2ListMediaType: + return manifestSchema2FromManifestList(ctx, sys, src, manblob) + case imgspecv1.MediaTypeImageIndex: + return manifestOCI1FromImageIndex(ctx, sys, src, manblob) + default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) + } +} + +// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo. +func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo { + blobs := make([]types.BlobInfo, len(layers)) + for i, layer := range layers { + blobs[i] = layer.BlobInfo + } + return blobs +} + +// manifestConvertFn (a method of genericManifest object) returns a genericManifest implementation +// converted to a specific manifest MIME type. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original genericManifest object. +type manifestConvertFn func(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) + +// convertManifestIfRequiredWithUpdate will run conversion functions of a manifest if +// required and re-apply the options to the converted type. +// It returns (nil, nil) if no conversion was requested. +func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) { + if options.ManifestMIMEType == "" { + return nil, nil + } + + converter, ok := converters[options.ManifestMIMEType] + if !ok { + return nil, errors.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType) + } + + optionsCopy := options + convertedManifest, err := converter(ctx, &optionsCopy) + if err != nil { + return nil, err + } + convertedImage := memoryImageFromManifest(convertedManifest) + + optionsCopy.ManifestMIMEType = "" + return convertedImage.UpdatedImage(ctx, optionsCopy) +} diff --git a/vendor/github.com/containers/image/v5/internal/image/memory.go b/vendor/github.com/containers/image/v5/internal/image/memory.go new file mode 100644 index 000000000..4c96b37d8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/memory.go @@ -0,0 +1,64 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// memoryImage is a mostly-implementation of types.Image assembled from data +// created in memory, used primarily as a return value of types.Image.UpdatedImage +// as a way to carry various structured information in a type-safe and easy-to-use way. +// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone +// collection of all related information, e.g. there is no way to get layer blobs +// from a memoryImage. +type memoryImage struct { + genericManifest + serializedManifest []byte // A private cache for Manifest() +} + +func memoryImageFromManifest(m genericManifest) types.Image { + return &memoryImage{ + genericManifest: m, + serializedManifest: nil, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *memoryImage) Reference() types.ImageReference { + // It would really be inappropriate to return the ImageReference of the image this was based on. + return nil +} + +// Size returns the size of the image as stored, if known, or -1 if not. +func (i *memoryImage) Size() (int64, error) { + return -1, nil +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) { + if i.serializedManifest == nil { + m, err := i.genericManifest.serialize() + if err != nil { + return nil, "", err + } + i.serializedManifest = m + } + return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) { + // Modifying an image invalidates signatures; a caller asking the updated image for signatures + // is probably confused. + return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") +} + +// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go new file mode 100644 index 000000000..af1a90e82 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/oci.go @@ -0,0 +1,271 @@ +package image + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + internalManifest "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type manifestOCI1 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of m.Config. + m *manifest.OCI1 +} + +func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.OCI1FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestOCI1{ + src: src, + m: m, + }, nil +} + +// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: +func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { + return &manifestOCI1{ + src: src, + configBlob: configBlob, + m: manifest.OCI1FromComponents(config, layers), + } +} + +func (m *manifestOCI1) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestOCI1) manifestMIMEType() string { + return imgspecv1.MediaTypeImageManifest +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestOCI1) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") + } + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.m.Config.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType) + } + + cb, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(cb, configOCI); err != nil { + return nil, err + } + return configOCI, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestOCI1) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + return false +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + return config, nil + } + return m.m.Inspect(getter) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError +// if the combination of CompressionOperation and CompressionAlgorithm specified +// in one or more options.LayerInfos items indicates that a layer is compressed using +// an algorithm that is not allowed in OCI. +func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.OCI1Clone(m.m), + } + + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, + manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, + manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + + // No conversion required, update manifest + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. + + return memoryImageFromManifest(©), nil +} + +func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { + return manifest.Schema2Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + +// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +// +// We need this function just because a function returning an implementation of the genericManifest +// interface is not automatically assignable to a function type returning the genericManifest interface +func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + return m.convertToManifestSchema2(ctx, options) +} + +// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestOCI1 object. +func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) { + if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType) + } + + // Create a copy of the descriptor. + config := schema2DescriptorFromOCI1Descriptor(m.m.Config) + + // Above, we have already checked that this manifest refers to an image, not an OCI artifact, + // so the only difference between OCI and DockerSchema2 is the mediatypes. The + // media type of the manifest is handled by manifestSchema2FromComponents. + config.MediaType = manifest.DockerV2Schema2ConfigMediaType + + layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) + for idx := range layers { + layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) + switch layers[idx].MediaType { + case imgspecv1.MediaTypeImageLayerNonDistributable: + layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType + case imgspecv1.MediaTypeImageLayerNonDistributableGzip: + layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip + case imgspecv1.MediaTypeImageLayerNonDistributableZstd: + return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + case imgspecv1.MediaTypeImageLayer: + layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed + case imgspecv1.MediaTypeImageLayerGzip: + layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType + case imgspecv1.MediaTypeImageLayerZstd: + return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + default: + return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) + } + } + + // Rather than copying the ConfigBlob now, we just pass m.src to the + // translated manifest, since the only difference is the mediatype of + // descriptors there is no change to any blob stored in m.src. + return manifestSchema2FromComponents(config, m.src, nil, layers), nil +} + +// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestOCI1 object. +func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType) + } + + // We can't directly convert images to V1, but we can transitively convert via a V2 image + m2, err := m.convertToManifestSchema2(ctx, options) + if err != nil { + return nil, err + } + + return m2.convertToManifestSchema1(ctx, options) +} + +// SupportsEncryption returns if encryption is supported for the manifest type +func (m *manifestOCI1) SupportsEncryption(context.Context) bool { + return true +} + +// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image +// (and the code can handle that). +// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted +// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts +// to a different manifest format). +func (m *manifestOCI1) CanChangeLayerCompression(mimeType string) bool { + return m.m.CanChangeLayerCompression(mimeType) +} diff --git a/vendor/github.com/containers/image/v5/internal/image/oci_index.go b/vendor/github.com/containers/image/v5/internal/image/oci_index.go new file mode 100644 index 000000000..d6e6685b1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/oci_index.go @@ -0,0 +1,34 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + index, err := manifest.OCI1IndexFromManifest(manblob) + if err != nil { + return nil, errors.Wrapf(err, "parsing OCI1 index") + } + targetManifestDigest, err := index.ChooseInstance(sys) + if err != nil { + return nil, errors.Wrapf(err, "choosing image instance") + } + manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) + if err != nil { + return nil, errors.Wrapf(err, "fetching target platform image selected from image index") + } + + matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) + if err != nil { + return nil, errors.Wrap(err, "computing manifest digest") + } + if !matches { + return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) + } + + return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) +} diff --git a/vendor/github.com/containers/image/v5/internal/image/sourced.go b/vendor/github.com/containers/image/v5/internal/image/sourced.go new file mode 100644 index 000000000..dc09a9e04 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/sourced.go @@ -0,0 +1,134 @@ +// Package image consolidates knowledge about various container image formats +// (as opposed to image storage mechanisms, which are handled by types.ImageSource) +// and exposes all of them using an unified interface. +package image + +import ( + "context" + + "github.com/containers/image/v5/types" +) + +// FromReference returns a types.ImageCloser implementation for the default instance reading from reference. +// If reference poitns to a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. +// +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. +func FromReference(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (types.ImageCloser, error) { + src, err := ref.NewImageSource(ctx, sys) + if err != nil { + return nil, err + } + img, err := FromSource(ctx, sys, src) + if err != nil { + src.Close() + return nil, err + } + return img, nil +} + +// imageCloser implements types.ImageCloser, perhaps allowing simple users +// to use a single object without having keep a reference to a types.ImageSource +// only to call types.ImageSource.Close(). +type imageCloser struct { + types.Image + src types.ImageSource +} + +// FromSource returns a types.ImageCloser implementation for the default instance of source. +// If source is a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. +// +// FromSource “takes ownership” of the input ImageSource and will call src.Close() +// when the image is closed. (This does not prevent callers from using both the +// Image and ImageSource objects simultaneously, but it means that they only need to +// the Image.) +// +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. +// +// Most callers can use either FromUnparsedImage or FromReference instead. +// +// This is publicly visible as c/image/image.FromSource. +func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { + img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) + if err != nil { + return nil, err + } + return &imageCloser{ + Image: img, + src: src, + }, nil +} + +func (ic *imageCloser) Close() error { + return ic.src.Close() +} + +// SourcedImage is a general set of utilities for working with container images, +// whatever is their underlying transport (i.e. ImageSource-independent). +// Note the existence of docker.Image and image.memoryImage: various instances +// of a types.Image may not be a SourcedImage directly. +// +// Most external users of `types.Image` do not care, and those who care about `docker.Image` know they do. +// +// Internal users may depend on methods available in SourcedImage but not (yet?) in types.Image. +type SourcedImage struct { + *UnparsedImage + ManifestBlob []byte // The manifest of the relevant instance + ManifestMIMEType string // MIME type of ManifestBlob + // genericManifest contains data corresponding to manifestBlob. + // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest + // if you want to preserve the original manifest; use manifestBlob directly. + genericManifest +} + +// FromUnparsedImage returns a types.Image implementation for unparsed. +// If unparsed represents a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate single image. +// +// The Image must not be used after the underlying ImageSource is Close()d. +// +// This is publicly visible as c/image/image.FromUnparsedImage. +func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (*SourcedImage, error) { + // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: + // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, + // this is the only UnparsedImage implementation around, anyway. + + // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). + manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx) + if err != nil { + return nil, err + } + + parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType) + if err != nil { + return nil, err + } + + return &SourcedImage{ + UnparsedImage: unparsed, + ManifestBlob: manifestBlob, + ManifestMIMEType: manifestMIMEType, + genericManifest: parsedManifest, + }, nil +} + +// Size returns the size of the image as stored, if it's known, or -1 if it isn't. +func (i *SourcedImage) Size() (int64, error) { + return -1, nil +} + +// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. +func (i *SourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { + return i.ManifestBlob, i.ManifestMIMEType, nil +} + +func (i *SourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest) +} diff --git a/vendor/github.com/containers/image/v5/internal/image/unparsed.go b/vendor/github.com/containers/image/v5/internal/image/unparsed.go new file mode 100644 index 000000000..8ea0f61b4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/unparsed.go @@ -0,0 +1,99 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// UnparsedImage implements types.UnparsedImage . +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// This is publicly visible as c/image/image.UnparsedImage. +type UnparsedImage struct { + src types.ImageSource + instanceDigest *digest.Digest + cachedManifest []byte // A private cache for Manifest(); nil if not yet known. + // A private cache for Manifest(), may be the empty string if guessing failed. + // Valid iff cachedManifest is not nil. + cachedManifestMIMEType string + cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. +} + +// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +// +// This is publicly visible as c/image/image.UnparsedInstance. +func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { + return &UnparsedImage{ + src: src, + instanceDigest: instanceDigest, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *UnparsedImage) Reference() types.ImageReference { + // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. + return i.src.Reference() +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { + if i.cachedManifest == nil { + m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) + if err != nil { + return nil, "", err + } + + // ImageSource.GetManifest does not do digest verification, but we do; + // this immediately protects also any user of types.Image. + if digest, haveDigest := i.expectedManifestDigest(); haveDigest { + matches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return nil, "", errors.Wrap(err, "computing manifest digest") + } + if !matches { + return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) + } + } + + i.cachedManifest = m + i.cachedManifestMIMEType = mt + } + return i.cachedManifest, i.cachedManifestMIMEType, nil +} + +// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. +// The bool return value seems redundant with digest != ""; it is used explicitly +// to refuse (unexpected) situations when the digest exists but is "". +func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { + if i.instanceDigest != nil { + return *i.instanceDigest, true + } + ref := i.Reference().DockerReference() + if ref != nil { + if canonical, ok := ref.(reference.Canonical); ok { + return canonical.Digest(), true + } + } + return "", false +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { + if i.cachedSignatures == nil { + sigs, err := i.src.GetSignatures(ctx, i.instanceDigest) + if err != nil { + return nil, err + } + i.cachedSignatures = sigs + } + return i.cachedSignatures, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/errors.go b/vendor/github.com/containers/image/v5/internal/manifest/errors.go new file mode 100644 index 000000000..e5732a8c4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/errors.go @@ -0,0 +1,32 @@ +package manifest + +import "fmt" + +// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation +// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact) +// +// This is publicly visible as c/image/manifest.NonImageArtifactError (but we don’t provide a public constructor) +type NonImageArtifactError struct { + // Callers should not be blindly calling image-specific operations and only checking MIME types + // on failure; if they care about the artifact type, they should check before using it. + // If they blindly assume an image, they don’t really need this value; just a type check + // is sufficient for basic "we can only pull images" UI. + // + // Also, there are fairly widespread “artifacts” which nevertheless use imgspecv1.MediaTypeImageConfig, + // e.g. https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md , which could cause the callers + // to complain about a non-image artifact with the correct MIME type; we should probably add some other kind of + // type discrimination, _and_ somehow make it available in the API, if we expect API callers to make decisions + // based on that kind of data. + // + // So, let’s not expose this until a specific need is identified. + mimeType string +} + +// NewNonImageArtifactError returns a NonImageArtifactError about an artifact with mimeType. +func NewNonImageArtifactError(mimeType string) error { + return NonImageArtifactError{mimeType: mimeType} +} + +func (e NonImageArtifactError) Error() string { + return fmt.Sprintf("unsupported image-specific operation on artifact with type %q", e.mimeType) +} diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index 20955ab7f..9cf7dd3a9 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -118,6 +118,18 @@ type compressionMIMETypeSet map[string]string const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported” +// findCompressionMIMETypeSet returns a pointer to a compressionMIMETypeSet in variantTable that contains a value of mimeType, or nil if not found +func findCompressionMIMETypeSet(variantTable []compressionMIMETypeSet, mimeType string) compressionMIMETypeSet { + for _, variants := range variantTable { + for _, mt := range variants { + if mt == mimeType { + return variants + } + } + } + return nil +} + // compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil // to mean "no compression"), based on variantTable. // The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants @@ -130,29 +142,26 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries return "", fmt.Errorf("cannot update unknown MIME type") } - for _, variants := range variantTable { - for _, mt := range variants { - if mt == mimeType { // Found the variant - name := mtsUncompressed - if algorithm != nil { - name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark() - } - if res, ok := variants[name]; ok { - if res != mtsUnsupportedMIMEType { - return res, nil - } - if name != mtsUncompressed { - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mt)} - } - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} - } - if name != mtsUncompressed { - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mt)} - } - // We can't very well say “the idea of no compression is unknown” - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} + variants := findCompressionMIMETypeSet(variantTable, mimeType) + if variants != nil { + name := mtsUncompressed + if algorithm != nil { + name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark() + } + if res, ok := variants[name]; ok { + if res != mtsUnsupportedMIMEType { + return res, nil } + if name != mtsUncompressed { + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mimeType)} + } + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)} + } + if name != mtsUncompressed { + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mimeType)} } + // We can't very well say “the idea of no compression is unknown” + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)} } if algorithm != nil { return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType) @@ -209,3 +218,13 @@ type ManifestLayerCompressionIncompatibilityError struct { func (m ManifestLayerCompressionIncompatibilityError) Error() string { return m.text } + +// compressionVariantsRecognizeMIMEType returns true if variantTable contains data about compressing/decompressing layers with mimeType +// Note that the caller still needs to worry about a specific algorithm not being supported. +func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet, mimeType string) bool { + if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries + return false + } + variants := findCompressionMIMETypeSet(variantTable, mimeType) + return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm. +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go index 1f4db54ee..8b3fbdd39 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -295,3 +295,11 @@ func (m *Schema2) ImageID([]digest.Digest) (string, error) { } return m.ConfigDescriptor.Digest.Hex(), nil } + +// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image +// (and the code can handle that). +// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted +// algorithms depends not on the current format, but possibly on the target of a conversion. +func (m *Schema2) CanChangeLayerCompression(mimeType string) bool { + return compressionVariantsRecognizeMIMEType(schema2CompressionMIMETypeSets, mimeType) +} diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go index 2e3e5da15..53fc866a7 100644 --- a/vendor/github.com/containers/image/v5/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" + internalManifest "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/types" "github.com/containers/libtrust" digest "github.com/opencontainers/go-digest" @@ -34,6 +35,10 @@ const ( DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" ) +// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation +// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact) +type NonImageArtifactError = internalManifest.NonImageArtifactError + // SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type. func SupportedSchema2MediaType(m string) error { switch m { diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index 5892184df..11927ab5e 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + internalManifest "github.com/containers/image/v5/internal/manifest" compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" ociencspec "github.com/containers/ocicrypt/spec" @@ -115,6 +116,12 @@ var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{ // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers) // The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and // CompressionAlgorithm that isn't supported by OCI. +// +// It’s generally the caller’s responsibility to determine whether a particular edit is acceptable, rather than relying on +// failures of this function, because the layer is typically created _before_ UpdateLayerInfos is called, because UpdateLayerInfos needs +// to know the final digest). See OCI1.CanChangeLayerCompression for some help in determining this; other aspects like compression +// algorithms that might not be supported by a format, or the limited set of MIME types accepted for encryption, are not currently +// handled — that logic should eventually also be provided as OCI1 methods, not hard-coded in callers. func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { if len(m.Layers) != len(layerInfos) { return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) @@ -151,6 +158,33 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { return nil } +// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return +// an error if the mediatype does not support encryption +func getEncryptedMediaType(mediatype string) (string, error) { + for _, s := range strings.Split(mediatype, "+")[1:] { + if s == "encrypted" { + return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype) + } + } + unsuffixedMediatype := strings.Split(mediatype, "+")[0] + switch unsuffixedMediatype { + case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable: + return mediatype + "+encrypted", nil + } + + return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype) +} + +// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return +// an error if the mediatype does not support decryption +func getDecryptedMediaType(mediatype string) (string, error) { + if !strings.HasSuffix(mediatype, "+encrypted") { + return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype) + } + + return strings.TrimSuffix(mediatype, "+encrypted"), nil +} + // Serialize returns the manifest in a blob format. // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! func (m *OCI1) Serialize() ([]byte, error) { @@ -159,6 +193,14 @@ func (m *OCI1) Serialize() ([]byte, error) { // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + // We could return at least the layers, but that’s already available in a better format via types.Image.LayerInfos. + // Most software calling this without human intervention is going to expect the values to be realistic and relevant, + // and is probably better served by failing; we can always re-visit that later if we fail now, but + // if we started returning some data for OCI artifacts now, we couldn’t start failing in this function later. + return nil, internalManifest.NewNonImageArtifactError(m.Config.MediaType) + } + config, err := configGetter(m.ConfigInfo()) if err != nil { return nil, err @@ -186,35 +228,39 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type // ImageID computes an ID which can uniquely identify this image by its contents. func (m *OCI1) ImageID([]digest.Digest) (string, error) { + // The way m.Config.Digest “uniquely identifies” an image is + // by containing RootFS.DiffIDs, which identify the layers of the image. + // For non-image artifacts, the we can’t expect the config to change + // any time the other layers (semantically) change, so this approach of + // distinguishing objects only by m.Config.Digest doesn’t work in general. + // + // Any caller of this method presumably wants to disambiguate the same + // images with a different representation, but doesn’t want to disambiguate + // representations (by using a manifest digest). So, submitting a non-image + // artifact to such a caller indicates an expectation mismatch. + // So, we just fail here instead of inventing some other ID value (e.g. + // by combining the config and blob layer digests). That still + // gives us the option to not fail, and return some value, in the future, + // without committing to that approach now. + // (The only known caller of ImageID is storage/storageImageDestination.computeID, + // which can’t work with non-image artifacts.) + if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + return "", internalManifest.NewNonImageArtifactError(m.Config.MediaType) + } + if err := m.Config.Digest.Validate(); err != nil { return "", err } return m.Config.Digest.Hex(), nil } -// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return -// an error if the mediatype does not support encryption -func getEncryptedMediaType(mediatype string) (string, error) { - for _, s := range strings.Split(mediatype, "+")[1:] { - if s == "encrypted" { - return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype) - } - } - unsuffixedMediatype := strings.Split(mediatype, "+")[0] - switch unsuffixedMediatype { - case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable: - return mediatype + "+encrypted", nil - } - - return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype) -} - -// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return -// an error if the mediatype does not support decryption -func getDecryptedMediaType(mediatype string) (string, error) { - if !strings.HasSuffix(mediatype, "+encrypted") { - return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype) +// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image +// (and the code can handle that). +// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted +// algorithms depends not on the current format, but possibly on the target of a conversion. +func (m *OCI1) CanChangeLayerCompression(mimeType string) bool { + if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + return false } - - return strings.TrimSuffix(mediatype, "+encrypted"), nil + return compressionVariantsRecognizeMIMEType(oci1CompressionMIMETypeSets, mimeType) } diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go index 4fa912765..74fefbd4f 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -8,7 +8,7 @@ import ( "github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/internal/tmpdir" "github.com/containers/image/v5/oci/internal" ocilayout "github.com/containers/image/v5/oci/layout" @@ -122,11 +122,7 @@ func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) + return image.FromReference(ctx, sys, ref) } // NewImageSource returns a types.ImageSource for this reference. diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go index a99b63158..a9029a609 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -10,7 +10,7 @@ import ( "github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/oci/internal" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" @@ -154,11 +154,7 @@ func (ref ociReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(sys, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) + return image.FromReference(ctx, sys, ref) } // getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go index 6bbb43be2..c8d65c78a 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go @@ -8,7 +8,7 @@ import ( "github.com/containers/image/v5/docker/policyconfiguration" "github.com/containers/image/v5/docker/reference" - genericImage "github.com/containers/image/v5/image" + genericImage "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/pkg/errors" @@ -132,11 +132,7 @@ func (ref openshiftReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(sys, ref) - if err != nil { - return nil, err - } - return genericImage.FromSource(ctx, sys, src) + return genericImage.FromReference(ctx, sys, ref) } // NewImageSource returns a types.ImageSource for this reference. diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go index 1e35ab605..6c4262368 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go @@ -14,7 +14,7 @@ import ( "github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/pkg/errors" @@ -184,17 +184,7 @@ func (s *ostreeImageCloser) Size() (int64, error) { // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - var tmpDir string - if sys == nil || sys.OSTreeTmpDirPath == "" { - tmpDir = os.TempDir() - } else { - tmpDir = sys.OSTreeTmpDirPath - } - src, err := newImageSource(tmpDir, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) + return image.FromReference(ctx, sys, ref) } // NewImageSource returns a types.ImageSource for this reference. diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go index 8b22733ac..c9971cbdc 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go @@ -9,7 +9,7 @@ import ( "sync" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/transports" @@ -158,11 +158,7 @@ func (b *BlobCache) ClearCache() error { } func (b *BlobCache) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := b.NewImageSource(ctx, sys) - if err != nil { - return nil, errors.Wrapf(err, "error creating new image %q", transports.ImageName(b.reference)) - } - return image.FromSource(ctx, sys, src) + return image.FromReference(ctx, sys, b) } func (b *BlobCache) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { diff --git a/vendor/github.com/containers/image/v5/sif/transport.go b/vendor/github.com/containers/image/v5/sif/transport.go index 18d894bc3..2037f2508 100644 --- a/vendor/github.com/containers/image/v5/sif/transport.go +++ b/vendor/github.com/containers/image/v5/sif/transport.go @@ -9,7 +9,7 @@ import ( "github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" ) @@ -139,11 +139,7 @@ func (ref sifReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) + return image.FromReference(ctx, sys, ref) } // NewImageSource returns a types.ImageSource for this reference. diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index 8071e3b32..06f90d363 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -16,7 +16,7 @@ import ( "sync/atomic" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/internal/tmpdir" @@ -486,7 +486,7 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, } // putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file. -// The caller must arrange the blob to be eventually commited using s.commitLayer(). +// The caller must arrange the blob to be eventually committed using s.commitLayer(). func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) { // Stores a layer or data blob in our temporary directory, checking that any information // in the blobinfo matches the incoming data. @@ -641,7 +641,7 @@ func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo t } // tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata. -// The caller must arrange the blob to be eventually commited using s.commitLayer(). +// The caller must arrange the blob to be eventually committed using s.commitLayer(). func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { // lock the entire method as it executes fairly quickly s.lock.Lock() @@ -941,7 +941,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest s.lock.Unlock() if ok { layer, err := al.PutAs(id, lastLayer, nil) - if err != nil { + if err != nil && errors.Cause(err) != storage.ErrDuplicateID { return errors.Wrapf(err, "failed to put layer from digest and labels") } lastLayer = layer.ID diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go index 23f67c49e..690067ec3 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go @@ -7,7 +7,7 @@ import ( "strings" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -67,16 +67,7 @@ func (r *tarballReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := r.NewImageSource(ctx, sys) - if err != nil { - return nil, err - } - img, err := image.FromSource(ctx, sys, src) - if err != nil { - src.Close() - return nil, err - } - return img, nil + return image.FromReference(ctx, sys, r) } func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { diff --git a/vendor/github.com/containers/ocicrypt/.travis.yml b/vendor/github.com/containers/ocicrypt/.travis.yml index e4dd4a402..1036c8d3f 100644 --- a/vendor/github.com/containers/ocicrypt/.travis.yml +++ b/vendor/github.com/containers/ocicrypt/.travis.yml @@ -21,7 +21,7 @@ addons: go_import_path: github.com/containers/ocicrypt install: - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0 + - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.46.2 script: - make diff --git a/vendor/github.com/containers/ocicrypt/config/constructors.go b/vendor/github.com/containers/ocicrypt/config/constructors.go index a789d052d..c537a20a0 100644 --- a/vendor/github.com/containers/ocicrypt/config/constructors.go +++ b/vendor/github.com/containers/ocicrypt/config/constructors.go @@ -21,7 +21,7 @@ import ( "strings" "github.com/pkg/errors" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) // EncryptWithJwe returns a CryptoConfig to encrypt with jwe public keys diff --git a/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go b/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go index 76be34138..5a8bc022c 100644 --- a/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go +++ b/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go @@ -24,7 +24,7 @@ import ( "github.com/containers/ocicrypt/crypto/pkcs11" "github.com/pkg/errors" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) // OcicryptConfig represents the format of an imgcrypt.conf config file diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go index 7fcd2e3af..c6d47e830 100644 --- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go @@ -17,7 +17,7 @@ import ( "fmt" "github.com/pkg/errors" pkcs11uri "github.com/stefanberger/go-pkcs11uri" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) // Pkcs11KeyFile describes the format of the pkcs11 (private) key file. diff --git a/vendor/github.com/containers/ocicrypt/go.mod b/vendor/github.com/containers/ocicrypt/go.mod index 8837d288e..46ee2a289 100644 --- a/vendor/github.com/containers/ocicrypt/go.mod +++ b/vendor/github.com/containers/ocicrypt/go.mod @@ -17,5 +17,5 @@ require ( golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 google.golang.org/grpc v1.33.2 gopkg.in/square/go-jose.v2 v2.5.1 - gopkg.in/yaml.v2 v2.4.0 + gopkg.in/yaml.v3 v3.0.0 ) diff --git a/vendor/github.com/containers/ocicrypt/go.sum b/vendor/github.com/containers/ocicrypt/go.sum index a621a145c..86e36e768 100644 --- a/vendor/github.com/containers/ocicrypt/go.sum +++ b/vendor/github.com/containers/ocicrypt/go.sum @@ -111,7 +111,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index aa8cbd7ce..7e6f7aeee 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -8,8 +8,7 @@ [![Coverage Status][9]][10] [![Sourcegraph][11]][12] [![FOSSA Status][13]][14] - -[![GoCenter Kudos][15]][16] +[![Become my sponsor][15]][16] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo @@ -25,8 +24,8 @@ [12]: https://sourcegraph.com/github.com/imdario/mergo?badge [13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo -[16]: https://search.gocenter.io/github.com/imdario/mergo +[15]: https://img.shields.io/github/sponsors/imdario +[16]: https://github.com/sponsors/imdario A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. @@ -36,11 +35,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the ## Status -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). ### Important note -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. @@ -51,12 +50,12 @@ If you were using Mergo before April 6th, 2015, please check your project works If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) Donate using Liberapay +Become my sponsor ### Mergo in the wild +- [cli/cli](https://github.com/cli/cli) - [moby/moby](https://github.com/moby/moby) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [vmware/dispatch](https://github.com/vmware/dispatch) @@ -98,6 +97,8 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) - [containerssh/containerssh](https://github.com/containerssh/containerssh) +- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +- [tjpnz/structbot](https://github.com/tjpnz/structbot) ## Install @@ -168,7 +169,7 @@ func main() { Note: if test are failing due missing package, please execute: - go get gopkg.in/yaml.v2 + go get gopkg.in/yaml.v3 ### Transformers @@ -218,7 +219,6 @@ func main() { } ``` - ## Contact me If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) @@ -227,18 +227,6 @@ If I can help you, you have an idea or you are using Mergo in your projects, don Written by [Dario Castañé](http://dario.im). -## Top Contributors - -[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) -[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) -[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) -[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) -[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) -[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) -[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) -[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) - - ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). diff --git a/vendor/github.com/imdario/mergo/go.mod b/vendor/github.com/imdario/mergo/go.mod index 3d689d93e..6f476d6ff 100644 --- a/vendor/github.com/imdario/mergo/go.mod +++ b/vendor/github.com/imdario/mergo/go.mod @@ -2,4 +2,4 @@ module github.com/imdario/mergo go 1.13 -require gopkg.in/yaml.v2 v2.3.0 +require gopkg.in/yaml.v3 v3.0.0 diff --git a/vendor/github.com/imdario/mergo/go.sum b/vendor/github.com/imdario/mergo/go.sum index 168980da5..223abcb42 100644 --- a/vendor/github.com/imdario/mergo/go.sum +++ b/vendor/github.com/imdario/mergo/go.sum @@ -1,4 +1,4 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 8c2a8fcd9..8b4e2f47a 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -79,7 +79,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co visited[h] = &visit{addr, typ, seen} } - if config.Transformers != nil && !isEmptyValue(dst) { + if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { if fn := config.Transformers.Transformer(dst.Type()); fn != nil { err = fn(dst, src) return diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index 3cc926c7f..9fe362d47 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -17,7 +17,7 @@ import ( var ( ErrNilArguments = errors.New("src and dst must not be nil") ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") + ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") ErrNonPointerAgument = errors.New("dst must be a pointer") @@ -65,7 +65,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { return } vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { err = ErrNotSupported return } diff --git a/vendor/modules.txt b/vendor/modules.txt index 199274680..c2dafa52a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -159,7 +159,7 @@ github.com/containers/common/version # github.com/containers/conmon v2.0.20+incompatible ## explicit github.com/containers/conmon/runner/config -# github.com/containers/image/v5 v5.21.2-0.20220520105616-e594853d6471 +# github.com/containers/image/v5 v5.21.2-0.20220617075545-929f14a56f5c ## explicit github.com/containers/image/v5/copy github.com/containers/image/v5/directory @@ -172,9 +172,11 @@ github.com/containers/image/v5/docker/policyconfiguration github.com/containers/image/v5/docker/reference github.com/containers/image/v5/image github.com/containers/image/v5/internal/blobinfocache +github.com/containers/image/v5/internal/image github.com/containers/image/v5/internal/imagedestination github.com/containers/image/v5/internal/imagesource github.com/containers/image/v5/internal/iolimits +github.com/containers/image/v5/internal/manifest github.com/containers/image/v5/internal/pkg/platform github.com/containers/image/v5/internal/private github.com/containers/image/v5/internal/putblobdigest @@ -212,7 +214,7 @@ github.com/containers/image/v5/types github.com/containers/image/v5/version # github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a github.com/containers/libtrust -# github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f +# github.com/containers/ocicrypt v1.1.5 ## explicit github.com/containers/ocicrypt github.com/containers/ocicrypt/blockcipher @@ -441,7 +443,7 @@ github.com/hashicorp/errwrap # github.com/hashicorp/go-multierror v1.1.1 ## explicit github.com/hashicorp/go-multierror -# github.com/imdario/mergo v0.3.12 +# github.com/imdario/mergo v0.3.13 github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.0.0 github.com/inconshreveable/mousetrap @@ -644,7 +646,7 @@ github.com/stefanberger/go-pkcs11uri ## explicit github.com/stretchr/testify/assert github.com/stretchr/testify/require -# github.com/sylabs/sif/v2 v2.7.0 +# github.com/sylabs/sif/v2 v2.7.1 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit -- cgit v1.2.3-54-g00ecf