summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/containers/image/copy/copy.go279
-rw-r--r--vendor/github.com/containers/image/copy/manifest.go29
-rw-r--r--vendor/github.com/containers/image/copy/sign.go14
-rw-r--r--vendor/github.com/containers/image/directory/directory_dest.go104
-rw-r--r--vendor/github.com/containers/image/directory/directory_src.go25
-rw-r--r--vendor/github.com/containers/image/directory/directory_transport.go20
-rw-r--r--vendor/github.com/containers/image/docker/archive/src.go5
-rw-r--r--vendor/github.com/containers/image/docker/archive/transport.go9
-rw-r--r--vendor/github.com/containers/image/docker/daemon/client.go69
-rw-r--r--vendor/github.com/containers/image/docker/daemon/daemon_dest.go29
-rw-r--r--vendor/github.com/containers/image/docker/daemon/daemon_src.go13
-rw-r--r--vendor/github.com/containers/image/docker/daemon/daemon_transport.go11
-rw-r--r--vendor/github.com/containers/image/docker/docker_image.go10
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_dest.go8
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_src.go48
-rw-r--r--vendor/github.com/containers/image/docker/docker_transport.go7
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/dest.go11
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/src.go32
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/types.go26
-rw-r--r--vendor/github.com/containers/image/image/docker_list.go54
-rw-r--r--vendor/github.com/containers/image/image/docker_schema1.go226
-rw-r--r--vendor/github.com/containers/image/image/docker_schema2.go122
-rw-r--r--vendor/github.com/containers/image/image/manifest.go27
-rw-r--r--vendor/github.com/containers/image/image/memory.go17
-rw-r--r--vendor/github.com/containers/image/image/oci.go88
-rw-r--r--vendor/github.com/containers/image/image/sourced.go54
-rw-r--r--vendor/github.com/containers/image/image/unparsed.go69
-rw-r--r--vendor/github.com/containers/image/internal/tmpdir/tmpdir.go19
-rw-r--r--vendor/github.com/containers/image/manifest/docker_schema1.go212
-rw-r--r--vendor/github.com/containers/image/manifest/docker_schema2.go241
-rw-r--r--vendor/github.com/containers/image/manifest/manifest.go76
-rw-r--r--vendor/github.com/containers/image/manifest/oci.go108
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_src.go26
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_transport.go73
-rw-r--r--vendor/github.com/containers/image/oci/internal/oci_util.go126
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_dest.go52
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_src.go65
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_transport.go77
-rw-r--r--vendor/github.com/containers/image/openshift/openshift.go36
-rw-r--r--vendor/github.com/containers/image/openshift/openshift_transport.go9
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_dest.go143
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_src.go349
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_transport.go41
-rw-r--r--vendor/github.com/containers/image/signature/policy_config.go6
-rw-r--r--vendor/github.com/containers/image/storage/storage_image.go1036
-rw-r--r--vendor/github.com/containers/image/storage/storage_reference.go40
-rw-r--r--vendor/github.com/containers/image/storage/storage_transport.go179
-rw-r--r--vendor/github.com/containers/image/tarball/tarball_reference.go9
-rw-r--r--vendor/github.com/containers/image/tarball/tarball_src.go22
-rw-r--r--vendor/github.com/containers/image/transports/alltransports/storage_stub.go2
-rw-r--r--vendor/github.com/containers/image/types/types.go78
-rw-r--r--vendor/github.com/containers/image/vendor.conf7
52 files changed, 2258 insertions, 2180 deletions
diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go
index 0380bf72d..ac97cad95 100644
--- a/vendor/github.com/containers/image/copy/copy.go
+++ b/vendor/github.com/containers/image/copy/copy.go
@@ -12,8 +12,6 @@ import (
"strings"
"time"
- pb "gopkg.in/cheggaaa/pb.v1"
-
"github.com/containers/image/image"
"github.com/containers/image/pkg/compression"
"github.com/containers/image/signature"
@@ -22,6 +20,7 @@ import (
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ pb "gopkg.in/cheggaaa/pb.v1"
)
type digestingReader struct {
@@ -31,23 +30,6 @@ type digestingReader struct {
validationFailed bool
}
-// imageCopier allows us to keep track of diffID values for blobs, and other
-// data, that we're copying between images, and cache other information that
-// might allow us to take some shortcuts
-type imageCopier struct {
- copiedBlobs map[digest.Digest]digest.Digest
- cachedDiffIDs map[digest.Digest]digest.Digest
- manifestUpdates *types.ManifestUpdateOptions
- dest types.ImageDestination
- src types.Image
- rawSource types.ImageSource
- diffIDsAreNeeded bool
- canModifyManifest bool
- reportWriter io.Writer
- progressInterval time.Duration
- progress chan types.ProgressProperties
-}
-
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
// and set validationFailed to true if the source stream does not match expectedDigest.
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
@@ -86,6 +68,27 @@ func (d *digestingReader) Read(p []byte) (int, error) {
return n, err
}
+// copier allows us to keep track of diffID values for blobs, and other
+// data shared across one or more images in a possible manifest list.
+type copier struct {
+ copiedBlobs map[digest.Digest]digest.Digest
+ cachedDiffIDs map[digest.Digest]digest.Digest
+ dest types.ImageDestination
+ rawSource types.ImageSource
+ reportWriter io.Writer
+ progressInterval time.Duration
+ progress chan types.ProgressProperties
+}
+
+// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
+type imageCopier struct {
+ c *copier
+ manifestUpdates *types.ManifestUpdateOptions
+ src types.Image
+ diffIDsAreNeeded bool
+ canModifyManifest bool
+}
+
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
type Options struct {
RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
@@ -95,6 +98,8 @@ type Options struct {
DestinationCtx *types.SystemContext
ProgressInterval time.Duration // time to wait between reports to signal the progress channel
Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset.
+ // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type
+ ForceManifestMIMEType string
}
// Image copies image from srcRef to destRef, using policyContext to validate
@@ -115,10 +120,6 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
reportWriter = options.ReportWriter
}
- writeReport := func(f string, a ...interface{}) {
- fmt.Fprintf(reportWriter, f, a...)
- }
-
dest, err := destRef.NewImageDestination(options.DestinationCtx)
if err != nil {
return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
@@ -133,43 +134,89 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
if err != nil {
return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
}
- unparsedImage := image.UnparsedFromSource(rawSource)
defer func() {
- if unparsedImage != nil {
- if err := unparsedImage.Close(); err != nil {
- retErr = errors.Wrapf(retErr, " (unparsed: %v)", err)
- }
+ if err := rawSource.Close(); err != nil {
+ retErr = errors.Wrapf(retErr, " (src: %v)", err)
}
}()
+ c := &copier{
+ copiedBlobs: make(map[digest.Digest]digest.Digest),
+ cachedDiffIDs: make(map[digest.Digest]digest.Digest),
+ dest: dest,
+ rawSource: rawSource,
+ reportWriter: reportWriter,
+ progressInterval: options.ProgressInterval,
+ progress: options.Progress,
+ }
+
+ unparsedToplevel := image.UnparsedInstance(rawSource, nil)
+ multiImage, err := isMultiImage(unparsedToplevel)
+ if err != nil {
+ return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef))
+ }
+
+ if !multiImage {
+ // The simple case: Just copy a single image.
+ if err := c.copyOneImage(policyContext, options, unparsedToplevel); err != nil {
+ return err
+ }
+ } else {
+ // This is a manifest list. Choose a single image and copy it.
+ // FIXME: Copy to destinations which support manifest lists, one image at a time.
+ instanceDigest, err := image.ChooseManifestInstanceFromManifestList(options.SourceCtx, unparsedToplevel)
+ if err != nil {
+ return errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef))
+ }
+ logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest)
+ unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
+
+ if err := c.copyOneImage(policyContext, options, unparsedInstance); err != nil {
+ return err
+ }
+ }
+
+ if err := c.dest.Commit(); err != nil {
+ return errors.Wrap(err, "Error committing the finished image")
+ }
+
+ return nil
+}
+
+// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate
+// source image admissibility.
+func (c *copier) copyOneImage(policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (retErr error) {
+ // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
+ // Make sure we fail cleanly in such cases.
+ multiImage, err := isMultiImage(unparsedImage)
+ if err != nil {
+ // FIXME FIXME: How to name a reference for the sub-image?
+ return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
+ }
+ if multiImage {
+ return fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
+ }
+
// Please keep this policy check BEFORE reading any other information about the image.
+ // (the multiImage check above only matches the MIME type, which we have received anyway.
+ // Actual parsing of anything should be deferred.)
if allowed, err := policyContext.IsRunningImageAllowed(unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
return errors.Wrap(err, "Source image rejected")
}
- src, err := image.FromUnparsedImage(unparsedImage)
+ src, err := image.FromUnparsedImage(options.SourceCtx, unparsedImage)
if err != nil {
- return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(srcRef))
+ return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
}
- unparsedImage = nil
- defer func() {
- if err := src.Close(); err != nil {
- retErr = errors.Wrapf(retErr, " (source: %v)", err)
- }
- }()
- if err := checkImageDestinationForCurrentRuntimeOS(src, dest); err != nil {
+ if err := checkImageDestinationForCurrentRuntimeOS(options.DestinationCtx, src, c.dest); err != nil {
return err
}
- if src.IsMultiImage() {
- return errors.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef))
- }
-
var sigs [][]byte
if options.RemoveSignatures {
sigs = [][]byte{}
} else {
- writeReport("Getting image source signatures\n")
+ c.Printf("Getting image source signatures\n")
s, err := src.Signatures(context.TODO())
if err != nil {
return errors.Wrap(err, "Error reading signatures")
@@ -177,41 +224,33 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
sigs = s
}
if len(sigs) != 0 {
- writeReport("Checking if image destination supports signatures\n")
- if err := dest.SupportsSignatures(); err != nil {
+ c.Printf("Checking if image destination supports signatures\n")
+ if err := c.dest.SupportsSignatures(); err != nil {
return errors.Wrap(err, "Can not copy signatures")
}
}
- canModifyManifest := len(sigs) == 0
- manifestUpdates := types.ManifestUpdateOptions{}
- manifestUpdates.InformationOnly.Destination = dest
+ ic := imageCopier{
+ c: c,
+ manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
+ src: src,
+ // diffIDsAreNeeded is computed later
+ canModifyManifest: len(sigs) == 0,
+ }
- if err := updateEmbeddedDockerReference(&manifestUpdates, dest, src, canModifyManifest); err != nil {
+ if err := ic.updateEmbeddedDockerReference(); err != nil {
return err
}
// We compute preferredManifestMIMEType only to show it in error messages.
// Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
- preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := determineManifestConversion(&manifestUpdates, src, dest.SupportedManifestMIMETypes(), canModifyManifest)
+ preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType)
if err != nil {
return err
}
- // If src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates) will be true, it needs to be true by the time we get here.
- ic := imageCopier{
- copiedBlobs: make(map[digest.Digest]digest.Digest),
- cachedDiffIDs: make(map[digest.Digest]digest.Digest),
- manifestUpdates: &manifestUpdates,
- dest: dest,
- src: src,
- rawSource: rawSource,
- diffIDsAreNeeded: src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates),
- canModifyManifest: canModifyManifest,
- reportWriter: reportWriter,
- progressInterval: options.ProgressInterval,
- progress: options.Progress,
- }
+ // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
+ ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
if err := ic.copyLayers(); err != nil {
return err
@@ -233,9 +272,9 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
}
// If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType.
// So if we are here, we will definitely be trying to convert the manifest.
- // With !canModifyManifest, that would just be a string of repeated failures for the same reason,
+ // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason,
// so let’s bail out early and with a better error message.
- if !canModifyManifest {
+ if !ic.canModifyManifest {
return errors.Wrap(err, "Writing manifest failed (and converting it is not possible)")
}
@@ -243,7 +282,7 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)}
for _, manifestMIMEType := range otherManifestMIMETypeCandidates {
logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
- manifestUpdates.ManifestMIMEType = manifestMIMEType
+ ic.manifestUpdates.ManifestMIMEType = manifestMIMEType
attemptedManifest, err := ic.copyUpdatedConfigAndManifest()
if err != nil {
logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err)
@@ -262,35 +301,44 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
}
if options.SignBy != "" {
- newSig, err := createSignature(dest, manifest, options.SignBy, reportWriter)
+ newSig, err := c.createSignature(manifest, options.SignBy)
if err != nil {
return err
}
sigs = append(sigs, newSig)
}
- writeReport("Storing signatures\n")
- if err := dest.PutSignatures(sigs); err != nil {
+ c.Printf("Storing signatures\n")
+ if err := c.dest.PutSignatures(sigs); err != nil {
return errors.Wrap(err, "Error writing signatures")
}
- if err := dest.Commit(); err != nil {
- return errors.Wrap(err, "Error committing the finished image")
- }
-
return nil
}
-func checkImageDestinationForCurrentRuntimeOS(src types.Image, dest types.ImageDestination) error {
+// Printf writes a formatted string to c.reportWriter.
+// Note that the method name Printf is not entirely arbitrary: (go tool vet)
+// has a built-in list of functions/methods (whatever object they are for)
+// which have their format strings checked; for other names we would have
+// to pass a parameter to every (go tool vet) invocation.
+func (c *copier) Printf(format string, a ...interface{}) {
+ fmt.Fprintf(c.reportWriter, format, a...)
+}
+
+func checkImageDestinationForCurrentRuntimeOS(ctx *types.SystemContext, src types.Image, dest types.ImageDestination) error {
if dest.MustMatchRuntimeOS() {
+ wantedOS := runtime.GOOS
+ if ctx != nil && ctx.OSChoice != "" {
+ wantedOS = ctx.OSChoice
+ }
c, err := src.OCIConfig()
if err != nil {
return errors.Wrapf(err, "Error parsing image configuration")
}
- osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, runtime.GOOS)
- if runtime.GOOS == "windows" && c.OS == "linux" {
+ osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, wantedOS)
+ if wantedOS == "windows" && c.OS == "linux" {
return osErr
- } else if runtime.GOOS != "windows" && c.OS == "windows" {
+ } else if wantedOS != "windows" && c.OS == "windows" {
return osErr
}
}
@@ -298,44 +346,35 @@ func checkImageDestinationForCurrentRuntimeOS(src types.Image, dest types.ImageD
}
// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests.
-func updateEmbeddedDockerReference(manifestUpdates *types.ManifestUpdateOptions, dest types.ImageDestination, src types.Image, canModifyManifest bool) error {
- destRef := dest.Reference().DockerReference()
+func (ic *imageCopier) updateEmbeddedDockerReference() error {
+ destRef := ic.c.dest.Reference().DockerReference()
if destRef == nil {
return nil // Destination does not care about Docker references
}
- if !src.EmbeddedDockerReferenceConflicts(destRef) {
+ if !ic.src.EmbeddedDockerReferenceConflicts(destRef) {
return nil // No reference embedded in the manifest, or it matches destRef already.
}
- if !canModifyManifest {
+ if !ic.canModifyManifest {
return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would invalidate existing signatures. Explicitly enable signature removal to proceed anyway",
- transports.ImageName(dest.Reference()), destRef.String())
+ transports.ImageName(ic.c.dest.Reference()), destRef.String())
}
- manifestUpdates.EmbeddedDockerReference = destRef
+ ic.manifestUpdates.EmbeddedDockerReference = destRef
return nil
}
-// copyLayers copies layers from src/rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
+// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
func (ic *imageCopier) copyLayers() error {
srcInfos := ic.src.LayerInfos()
destInfos := []types.BlobInfo{}
diffIDs := []digest.Digest{}
- updatedSrcInfos := ic.src.UpdatedLayerInfos()
- srcInfosUpdated := false
- if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
- if !ic.canModifyManifest {
- return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden")
- }
- srcInfos = updatedSrcInfos
- srcInfosUpdated = true
- }
for _, srcLayer := range srcInfos {
var (
destInfo types.BlobInfo
diffID digest.Digest
err error
)
- if ic.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
+ if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
// DiffIDs are, currently, needed only when converting from schema1.
// In which case src.LayerInfos will not have URLs because schema1
// does not support them.
@@ -343,7 +382,7 @@ func (ic *imageCopier) copyLayers() error {
return errors.New("getting DiffID for foreign layers is unimplemented")
}
destInfo = srcLayer
- fmt.Fprintf(ic.reportWriter, "Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.dest.Reference().Transport().Name())
+ ic.c.Printf("Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.c.dest.Reference().Transport().Name())
} else {
destInfo, diffID, err = ic.copyLayer(srcLayer)
if err != nil {
@@ -357,7 +396,7 @@ func (ic *imageCopier) copyLayers() error {
if ic.diffIDsAreNeeded {
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
}
- if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
+ if layerDigestsDiffer(srcInfos, destInfos) {
ic.manifestUpdates.LayerInfos = destInfos
}
return nil
@@ -388,7 +427,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest() ([]byte, error) {
// We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion.
// So, this can only happen if we are trying to upload using one of the other MIME type candidates.
// Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise
- // when ic.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2.
+ // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2.
// Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now.
// If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates.
return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
@@ -404,27 +443,27 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest() ([]byte, error) {
return nil, errors.Wrap(err, "Error reading manifest")
}
- if err := ic.copyConfig(pendingImage); err != nil {
+ if err := ic.c.copyConfig(pendingImage); err != nil {
return nil, err
}
- fmt.Fprintf(ic.reportWriter, "Writing manifest to image destination\n")
- if err := ic.dest.PutManifest(manifest); err != nil {
+ ic.c.Printf("Writing manifest to image destination\n")
+ if err := ic.c.dest.PutManifest(manifest); err != nil {
return nil, errors.Wrap(err, "Error writing manifest")
}
return manifest, nil
}
// copyConfig copies config.json, if any, from src to dest.
-func (ic *imageCopier) copyConfig(src types.Image) error {
+func (c *copier) copyConfig(src types.Image) error {
srcInfo := src.ConfigInfo()
if srcInfo.Digest != "" {
- fmt.Fprintf(ic.reportWriter, "Copying config %s\n", srcInfo.Digest)
+ c.Printf("Copying config %s\n", srcInfo.Digest)
configBlob, err := src.ConfigBlob()
if err != nil {
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
}
- destInfo, err := ic.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false)
+ destInfo, err := c.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false)
if err != nil {
return err
}
@@ -446,12 +485,12 @@ type diffIDResult struct {
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
// Check if we already have a blob with this digest
- haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo)
+ haveBlob, extantBlobSize, err := ic.c.dest.HasBlob(srcInfo)
if err != nil {
return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
}
// If we already have a cached diffID for this blob, we don't need to compute it
- diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.cachedDiffIDs[srcInfo.Digest] == "")
+ diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.c.cachedDiffIDs[srcInfo.Digest] == "")
// If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again
if haveBlob && !diffIDIsNeeded {
// Check the blob sizes match, if we were given a size this time
@@ -460,17 +499,17 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest
}
srcInfo.Size = extantBlobSize
// Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob
- blobinfo, err := ic.dest.ReapplyBlob(srcInfo)
+ blobinfo, err := ic.c.dest.ReapplyBlob(srcInfo)
if err != nil {
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest)
}
- fmt.Fprintf(ic.reportWriter, "Skipping fetch of repeat blob %s\n", srcInfo.Digest)
- return blobinfo, ic.cachedDiffIDs[srcInfo.Digest], err
+ ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest)
+ return blobinfo, ic.c.cachedDiffIDs[srcInfo.Digest], err
}
// Fallback: copy the layer, computing the diffID if we need to do so
- fmt.Fprintf(ic.reportWriter, "Copying blob %s\n", srcInfo.Digest)
- srcStream, srcBlobSize, err := ic.rawSource.GetBlob(srcInfo)
+ ic.c.Printf("Copying blob %s\n", srcInfo.Digest)
+ srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(srcInfo)
if err != nil {
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
}
@@ -488,7 +527,7 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
}
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
- ic.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
+ ic.c.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
}
return blobInfo, diffIDResult.digest, nil
}
@@ -522,7 +561,7 @@ func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.Bl
return pipeWriter
}
}
- blobInfo, err := ic.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success
+ blobInfo, err := ic.c.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success
return blobInfo, diffIDChan, err
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
}
@@ -556,7 +595,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
// perhaps compressing it if canCompress,
// and returns a complete blobInfo of the copied blob.
-func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
+func (c *copier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
canCompress bool) (types.BlobInfo, error) {
// The copying happens through a pipeline of connected io.Readers.
@@ -584,7 +623,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
// === Report progress using a pb.Reader.
bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
- bar.Output = ic.reportWriter
+ bar.Output = c.reportWriter
bar.SetMaxWidth(80)
bar.ShowTimeLeft = false
bar.ShowPercent = false
@@ -601,7 +640,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
// === Compress the layer if it is uncompressed and compression is desired
var inputInfo types.BlobInfo
- if !canCompress || isCompressed || !ic.dest.ShouldCompressLayers() {
+ if !canCompress || isCompressed || !c.dest.ShouldCompressLayers() {
logrus.Debugf("Using original blob without modification")
inputInfo = srcInfo
} else {
@@ -618,19 +657,19 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
inputInfo.Size = -1
}
- // === Report progress using the ic.progress channel, if required.
- if ic.progress != nil && ic.progressInterval > 0 {
+ // === Report progress using the c.progress channel, if required.
+ if c.progress != nil && c.progressInterval > 0 {
destStream = &progressReader{
source: destStream,
- channel: ic.progress,
- interval: ic.progressInterval,
+ channel: c.progress,
+ interval: c.progressInterval,
artifact: srcInfo,
lastTime: time.Now(),
}
}
// === Finally, send the layer stream to dest.
- uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo)
+ uploadedInfo, err := c.dest.PutBlob(destStream, inputInfo)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
}
diff --git a/vendor/github.com/containers/image/copy/manifest.go b/vendor/github.com/containers/image/copy/manifest.go
index e3b294dd1..7e4cd10ef 100644
--- a/vendor/github.com/containers/image/copy/manifest.go
+++ b/vendor/github.com/containers/image/copy/manifest.go
@@ -37,16 +37,20 @@ func (os *orderedSet) append(s string) {
}
}
-// determineManifestConversion updates manifestUpdates to convert manifest to a supported MIME type, if necessary and canModifyManifest.
-// Note that the conversion will only happen later, through src.UpdatedImage
+// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest.
+// Note that the conversion will only happen later, through ic.src.UpdatedImage
// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified),
// and a list of other possible alternatives, in order.
-func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, src types.Image, destSupportedManifestMIMETypes []string, canModifyManifest bool) (string, []string, error) {
- _, srcType, err := src.Manifest()
+func (ic *imageCopier) determineManifestConversion(destSupportedManifestMIMETypes []string, forceManifestMIMEType string) (string, []string, error) {
+ _, srcType, err := ic.src.Manifest()
if err != nil { // This should have been cached?!
return "", nil, errors.Wrap(err, "Error reading manifest")
}
+ if forceManifestMIMEType != "" {
+ destSupportedManifestMIMETypes = []string{forceManifestMIMEType}
+ }
+
if len(destSupportedManifestMIMETypes) == 0 {
return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions.
}
@@ -67,10 +71,10 @@ func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, s
if _, ok := supportedByDest[srcType]; ok {
prioritizedTypes.append(srcType)
}
- if !canModifyManifest {
- // We could also drop the !canModifyManifest parameter and have the caller
+ if !ic.canModifyManifest {
+ // We could also drop the !ic.canModifyManifest check and have the caller
// make the choice; it is already doing that to an extent, to improve error
- // messages. But it is nice to hide the “if !canModifyManifest, do no conversion”
+ // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion”
// special case in here; the caller can then worry (or not) only about a good UI.
logrus.Debugf("We can't modify the manifest, hoping for the best...")
return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying?
@@ -94,9 +98,18 @@ func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, s
}
preferredType := prioritizedTypes.list[0]
if preferredType != srcType {
- manifestUpdates.ManifestMIMEType = preferredType
+ ic.manifestUpdates.ManifestMIMEType = preferredType
} else {
logrus.Debugf("... will first try using the original manifest unmodified")
}
return preferredType, prioritizedTypes.list[1:], nil
}
+
+// isMultiImage returns true if img is a list of images
+func isMultiImage(img types.UnparsedImage) (bool, error) {
+ _, mt, err := img.Manifest()
+ if err != nil {
+ return false, err
+ }
+ return manifest.MIMETypeIsMultiImage(mt), nil
+}
diff --git a/vendor/github.com/containers/image/copy/sign.go b/vendor/github.com/containers/image/copy/sign.go
index 9187d70b3..91394d2b0 100644
--- a/vendor/github.com/containers/image/copy/sign.go
+++ b/vendor/github.com/containers/image/copy/sign.go
@@ -1,17 +1,13 @@
package copy
import (
- "fmt"
- "io"
-
"github.com/containers/image/signature"
"github.com/containers/image/transports"
- "github.com/containers/image/types"
"github.com/pkg/errors"
)
-// createSignature creates a new signature of manifest at (identified by) dest using keyIdentity.
-func createSignature(dest types.ImageDestination, manifest []byte, keyIdentity string, reportWriter io.Writer) ([]byte, error) {
+// createSignature creates a new signature of manifest using keyIdentity.
+func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) {
mech, err := signature.NewGPGSigningMechanism()
if err != nil {
return nil, errors.Wrap(err, "Error initializing GPG")
@@ -21,12 +17,12 @@ func createSignature(dest types.ImageDestination, manifest []byte, keyIdentity s
return nil, errors.Wrap(err, "Signing not supported")
}
- dockerReference := dest.Reference().DockerReference()
+ dockerReference := c.dest.Reference().DockerReference()
if dockerReference == nil {
- return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference()))
+ return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
}
- fmt.Fprintf(reportWriter, "Signing manifest\n")
+ c.Printf("Signing manifest\n")
newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity)
if err != nil {
return nil, errors.Wrap(err, "Error creating signature")
diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go
index ea46a27ed..47d59d9fe 100644
--- a/vendor/github.com/containers/image/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/directory/directory_dest.go
@@ -4,19 +4,77 @@ import (
"io"
"io/ioutil"
"os"
+ "path/filepath"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
+const version = "Directory Transport Version: 1.0\n"
+
+// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created
+// using the 'dir' transport
+var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data")
+
type dirImageDestination struct {
- ref dirReference
+ ref dirReference
+ compress bool
}
-// newImageDestination returns an ImageDestination for writing to an existing directory.
-func newImageDestination(ref dirReference) types.ImageDestination {
- return &dirImageDestination{ref}
+// newImageDestination returns an ImageDestination for writing to a directory.
+func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) {
+ d := &dirImageDestination{ref: ref, compress: compress}
+
+ // If directory exists check if it is empty
+ // if not empty, check whether the contents match that of a container image directory and overwrite the contents
+ // if the contents don't match throw an error
+ dirExists, err := pathExists(d.ref.resolvedPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath)
+ }
+ if dirExists {
+ isEmpty, err := isDirEmpty(d.ref.resolvedPath)
+ if err != nil {
+ return nil, err
+ }
+
+ if !isEmpty {
+ versionExists, err := pathExists(d.ref.versionPath())
+ if err != nil {
+ return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath())
+ }
+ if versionExists {
+ contents, err := ioutil.ReadFile(d.ref.versionPath())
+ if err != nil {
+ return nil, err
+ }
+ // check if contents of version file is what we expect it to be
+ if string(contents) != version {
+ return nil, ErrNotContainerImageDir
+ }
+ } else {
+ return nil, ErrNotContainerImageDir
+ }
+ // delete directory contents so that only one image is in the directory at a time
+ if err = removeDirContents(d.ref.resolvedPath); err != nil {
+ return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath)
+ }
+ logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath)
+ }
+ } else {
+ // create directory if it doesn't exist
+ if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil {
+ return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath)
+ }
+ }
+ // create version file
+ err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0755)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath())
+ }
+ return d, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
@@ -42,7 +100,7 @@ func (d *dirImageDestination) SupportsSignatures() error {
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
func (d *dirImageDestination) ShouldCompressLayers() bool {
- return false
+ return d.compress
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
@@ -147,3 +205,39 @@ func (d *dirImageDestination) PutSignatures(signatures [][]byte) error {
func (d *dirImageDestination) Commit() error {
return nil
}
+
+// returns true if path exists
+func pathExists(path string) (bool, error) {
+ _, err := os.Stat(path)
+ if err == nil {
+ return true, nil
+ }
+ if err != nil && os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+// returns true if directory is empty
+func isDirEmpty(path string) (bool, error) {
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ return false, err
+ }
+ return len(files) == 0, nil
+}
+
+// deletes the contents of a directory
+func removeDirContents(path string) error {
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ return err
+ }
+
+ for _, file := range files {
+ if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go
index 705e289b4..b362f5422 100644
--- a/vendor/github.com/containers/image/directory/directory_src.go
+++ b/vendor/github.com/containers/image/directory/directory_src.go
@@ -35,7 +35,12 @@ func (s *dirImageSource) Close() error {
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
-func (s *dirImageSource) GetManifest() ([]byte, string, error) {
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *dirImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`)
+ }
m, err := ioutil.ReadFile(s.ref.manifestPath())
if err != nil {
return nil, "", err
@@ -43,10 +48,6 @@ func (s *dirImageSource) GetManifest() ([]byte, string, error) {
return m, manifest.GuessMIMEType(m), err
}
-func (s *dirImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
- return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`)
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
r, err := os.Open(s.ref.layerPath(info.Digest))
@@ -60,7 +61,14 @@ func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, err
return r, fi.Size(), nil
}
-func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ if instanceDigest != nil {
+ return nil, errors.Errorf(`Manifests lists are not supported by "dir:"`)
+ }
signatures := [][]byte{}
for i := 0; ; i++ {
signature, err := ioutil.ReadFile(s.ref.signaturePath(i))
@@ -74,8 +82,3 @@ func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
}
return signatures, nil
}
-
-// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
-func (s *dirImageSource) UpdatedLayerInfos() []types.BlobInfo {
- return nil
-}
diff --git a/vendor/github.com/containers/image/directory/directory_transport.go b/vendor/github.com/containers/image/directory/directory_transport.go
index b9ce01a2e..c38753087 100644
--- a/vendor/github.com/containers/image/directory/directory_transport.go
+++ b/vendor/github.com/containers/image/directory/directory_transport.go
@@ -134,13 +134,14 @@ func (ref dirReference) PolicyConfigurationNamespaces() []string {
return res
}
-// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
-// The caller must call .Close() on the returned Image.
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
-func (ref dirReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref dirReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) {
src := newImageSource(ref)
- return image.FromSource(src)
+ return image.FromSource(ctx, src)
}
// NewImageSource returns a types.ImageSource for this reference.
@@ -152,7 +153,11 @@ func (ref dirReference) NewImageSource(ctx *types.SystemContext) (types.ImageSou
// NewImageDestination returns a types.ImageDestination for this reference.
// The caller must call .Close() on the returned ImageDestination.
func (ref dirReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
- return newImageDestination(ref), nil
+ compress := false
+ if ctx != nil {
+ compress = ctx.DirForceCompress
+ }
+ return newImageDestination(ref, compress)
}
// DeleteImage deletes the named image from the registry, if supported.
@@ -175,3 +180,8 @@ func (ref dirReference) layerPath(digest digest.Digest) string {
func (ref dirReference) signaturePath(index int) string {
return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1))
}
+
+// versionPath returns a path for the version file within a directory using our conventions.
+func (ref dirReference) versionPath() string {
+ return filepath.Join(ref.path, "version")
+}
diff --git a/vendor/github.com/containers/image/docker/archive/src.go b/vendor/github.com/containers/image/docker/archive/src.go
index b9941dfc9..aebcaa82a 100644
--- a/vendor/github.com/containers/image/docker/archive/src.go
+++ b/vendor/github.com/containers/image/docker/archive/src.go
@@ -34,8 +34,3 @@ func (s *archiveImageSource) Reference() types.ImageReference {
func (s *archiveImageSource) Close() error {
return nil
}
-
-// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
-func (s *archiveImageSource) UpdatedLayerInfos() []types.BlobInfo {
- return nil
-}
diff --git a/vendor/github.com/containers/image/docker/archive/transport.go b/vendor/github.com/containers/image/docker/archive/transport.go
index f38d4aced..047df73db 100644
--- a/vendor/github.com/containers/image/docker/archive/transport.go
+++ b/vendor/github.com/containers/image/docker/archive/transport.go
@@ -125,13 +125,14 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string {
return []string{}
}
-// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
-// The caller must call .Close() on the returned Image.
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
-func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) {
src := newImageSource(ctx, ref)
- return ctrImage.FromSource(src)
+ return ctrImage.FromSource(ctx, src)
}
// NewImageSource returns a types.ImageSource for this reference.
diff --git a/vendor/github.com/containers/image/docker/daemon/client.go b/vendor/github.com/containers/image/docker/daemon/client.go
new file mode 100644
index 000000000..82fab4b19
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/daemon/client.go
@@ -0,0 +1,69 @@
+package daemon
+
+import (
+ "net/http"
+ "path/filepath"
+
+ "github.com/containers/image/types"
+ dockerclient "github.com/docker/docker/client"
+ "github.com/docker/go-connections/tlsconfig"
+)
+
+const (
+ // The default API version to be used in case none is explicitly specified
+ defaultAPIVersion = "1.22"
+)
+
+// NewDockerClient initializes a new API client based on the passed SystemContext.
+func newDockerClient(ctx *types.SystemContext) (*dockerclient.Client, error) {
+ host := dockerclient.DefaultDockerHost
+ if ctx != nil && ctx.DockerDaemonHost != "" {
+ host = ctx.DockerDaemonHost
+ }
+
+ // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient.
+ // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s
+ // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket
+ // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport.
+ //
+ // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client.
+ proto, _, _, err := dockerclient.ParseHost(host)
+ if err != nil {
+ return nil, err
+ }
+ var httpClient *http.Client
+ if proto != "unix" {
+ hc, err := tlsConfig(ctx)
+ if err != nil {
+ return nil, err
+ }
+ httpClient = hc
+ }
+
+ return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil)
+}
+
+func tlsConfig(ctx *types.SystemContext) (*http.Client, error) {
+ options := tlsconfig.Options{}
+ if ctx != nil && ctx.DockerDaemonInsecureSkipTLSVerify {
+ options.InsecureSkipVerify = true
+ }
+
+ if ctx != nil && ctx.DockerDaemonCertPath != "" {
+ options.CAFile = filepath.Join(ctx.DockerDaemonCertPath, "ca.pem")
+ options.CertFile = filepath.Join(ctx.DockerDaemonCertPath, "cert.pem")
+ options.KeyFile = filepath.Join(ctx.DockerDaemonCertPath, "key.pem")
+ }
+
+ tlsc, err := tlsconfig.Client(options)
+ if err != nil {
+ return nil, err
+ }
+
+ return &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsc,
+ },
+ CheckRedirect: dockerclient.CheckRedirect,
+ }, nil
+}
diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go
index 559e5c71d..f73ac2339 100644
--- a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go
+++ b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go
@@ -14,6 +14,7 @@ import (
type daemonImageDestination struct {
ref daemonReference
+ mustMatchRuntimeOS bool
*tarfile.Destination // Implements most of types.ImageDestination
// For talking to imageLoadGoroutine
goroutineCancel context.CancelFunc
@@ -24,7 +25,7 @@ type daemonImageDestination struct {
}
// newImageDestination returns a types.ImageDestination for the specified image reference.
-func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) {
+func newImageDestination(ctx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) {
if ref.ref == nil {
return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
}
@@ -33,7 +34,12 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t
return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
}
- c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host
+ var mustMatchRuntimeOS = true
+ if ctx != nil && ctx.DockerDaemonHost != client.DefaultDockerHost {
+ mustMatchRuntimeOS = false
+ }
+
+ c, err := newDockerClient(ctx)
if err != nil {
return nil, errors.Wrap(err, "Error initializing docker engine client")
}
@@ -42,16 +48,17 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t
// Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it.
statusChannel := make(chan error, 1)
- ctx, goroutineCancel := context.WithCancel(context.Background())
- go imageLoadGoroutine(ctx, c, reader, statusChannel)
+ goroutineContext, goroutineCancel := context.WithCancel(context.Background())
+ go imageLoadGoroutine(goroutineContext, c, reader, statusChannel)
return &daemonImageDestination{
- ref: ref,
- Destination: tarfile.NewDestination(writer, namedTaggedRef),
- goroutineCancel: goroutineCancel,
- statusChannel: statusChannel,
- writer: writer,
- committed: false,
+ ref: ref,
+ mustMatchRuntimeOS: mustMatchRuntimeOS,
+ Destination: tarfile.NewDestination(writer, namedTaggedRef),
+ goroutineCancel: goroutineCancel,
+ statusChannel: statusChannel,
+ writer: writer,
+ committed: false,
}, nil
}
@@ -80,7 +87,7 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
func (d *daemonImageDestination) MustMatchRuntimeOS() bool {
- return true
+ return d.mustMatchRuntimeOS
}
// Close removes resources associated with an initialized ImageDestination, if any.
diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/docker/daemon/daemon_src.go
index 3d059da93..3bd4ad26d 100644
--- a/vendor/github.com/containers/image/docker/daemon/daemon_src.go
+++ b/vendor/github.com/containers/image/docker/daemon/daemon_src.go
@@ -6,14 +6,12 @@ import (
"os"
"github.com/containers/image/docker/tarfile"
+ "github.com/containers/image/internal/tmpdir"
"github.com/containers/image/types"
- "github.com/docker/docker/client"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
-const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
-
type daemonImageSource struct {
ref daemonReference
*tarfile.Source // Implements most of types.ImageSource
@@ -35,7 +33,7 @@ type layerInfo struct {
// is the config, and that the following len(RootFS) files are the layers, but that feels
// way too brittle.)
func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageSource, error) {
- c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host
+ c, err := newDockerClient(ctx)
if err != nil {
return nil, errors.Wrap(err, "Error initializing docker engine client")
}
@@ -48,7 +46,7 @@ func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageS
defer inputStream.Close()
// FIXME: use SystemContext here.
- tarCopyFile, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-daemon-tar")
+ tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-daemon-tar")
if err != nil {
return nil, err
}
@@ -83,8 +81,3 @@ func (s *daemonImageSource) Reference() types.ImageReference {
func (s *daemonImageSource) Close() error {
return os.Remove(s.tarCopyPath)
}
-
-// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
-func (s *daemonImageSource) UpdatedLayerInfos() []types.BlobInfo {
- return nil
-}
diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/docker/daemon/daemon_transport.go
index 41be1b2db..8ad6b521f 100644
--- a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go
+++ b/vendor/github.com/containers/image/docker/daemon/daemon_transport.go
@@ -151,14 +151,17 @@ func (ref daemonReference) PolicyConfigurationNamespaces() []string {
return []string{}
}
-// NewImage returns a types.Image for this reference.
-// The caller must call .Close() on the returned Image.
-func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(ctx, ref)
if err != nil {
return nil, err
}
- return image.FromSource(src)
+ return image.FromSource(ctx, src)
}
// NewImageSource returns a types.ImageSource for this reference.
diff --git a/vendor/github.com/containers/image/docker/docker_image.go b/vendor/github.com/containers/image/docker/docker_image.go
index 8be35b735..2148ed8ba 100644
--- a/vendor/github.com/containers/image/docker/docker_image.go
+++ b/vendor/github.com/containers/image/docker/docker_image.go
@@ -12,26 +12,26 @@ import (
"github.com/pkg/errors"
)
-// Image is a Docker-specific implementation of types.Image with a few extra methods
+// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods
// which are specific to Docker.
type Image struct {
- types.Image
+ types.ImageCloser
src *dockerImageSource
}
// newImage returns a new Image interface type after setting up
// a client to the registry hosting the given image.
// The caller must call .Close() on the returned Image.
-func newImage(ctx *types.SystemContext, ref dockerReference) (types.Image, error) {
+func newImage(ctx *types.SystemContext, ref dockerReference) (types.ImageCloser, error) {
s, err := newImageSource(ctx, ref)
if err != nil {
return nil, err
}
- img, err := image.FromSource(s)
+ img, err := image.FromSource(ctx, s)
if err != nil {
return nil, err
}
- return &Image{Image: img, src: s}, nil
+ return &Image{ImageCloser: img, src: s}, nil
}
// SourceRefFullName returns a fully expanded name for the repository this image is in.
diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go
index 32d5a18b1..79c386225 100644
--- a/vendor/github.com/containers/image/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/docker/docker_image_dest.go
@@ -236,7 +236,7 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
return err
}
defer res.Body.Close()
- if res.StatusCode != http.StatusCreated {
+ if !successStatus(res.StatusCode) {
err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest to %s", path)
if isManifestInvalidError(errors.Cause(err)) {
err = types.ManifestTypeRejectedError{Err: err}
@@ -246,6 +246,12 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
return nil
}
+// successStatus returns true if the argument is a successful HTTP response
+// code (in the range 200 - 399 inclusive).
+func successStatus(status int) bool {
+ return status >= 200 && status <= 399
+}
+
// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error.
func isManifestInvalidError(err error) bool {
errors, ok := err.(errcode.Errors)
diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go
index 14e3c2b50..259de0db1 100644
--- a/vendor/github.com/containers/image/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/docker/docker_image_src.go
@@ -52,11 +52,6 @@ func (s *dockerImageSource) Close() error {
return nil
}
-// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
-func (s *dockerImageSource) UpdatedLayerInfos() []types.BlobInfo {
- return nil
-}
-
// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string.
func simplifyContentType(contentType string) string {
@@ -72,7 +67,12 @@ func simplifyContentType(contentType string) string {
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
-func (s *dockerImageSource) GetManifest() ([]byte, string, error) {
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *dockerImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ return s.fetchManifest(context.TODO(), instanceDigest.String())
+ }
err := s.ensureManifestIsLoaded(context.TODO())
if err != nil {
return nil, "", err
@@ -99,18 +99,12 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil
}
-// GetTargetManifest returns an image's manifest given a digest.
-// This is mainly used to retrieve a single image's manifest out of a manifest list.
-func (s *dockerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
- return s.fetchManifest(context.TODO(), digest.String())
-}
-
// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType
//
// ImageSource implementations are not required or expected to do any caching,
// but because our signatures are “attached” to the manifest digest,
-// we need to ensure that the digest of the manifest returned by GetManifest
-// and used by GetSignatures are consistent, otherwise we would get spurious
+// we need to ensure that the digest of the manifest returned by GetManifest(nil)
+// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious
// signature verification failures when pulling while a tag is being updated.
func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error {
if s.cachedManifest != nil {
@@ -181,22 +175,30 @@ func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64,
return res.Body, getBlobSize(res), nil
}
-func (s *dockerImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
if err := s.c.detectProperties(ctx); err != nil {
return nil, err
}
switch {
case s.c.signatureBase != nil:
- return s.getSignaturesFromLookaside(ctx)
+ return s.getSignaturesFromLookaside(ctx, instanceDigest)
case s.c.supportsSignatures:
- return s.getSignaturesFromAPIExtension(ctx)
+ return s.getSignaturesFromAPIExtension(ctx, instanceDigest)
default:
return [][]byte{}, nil
}
}
-// manifestDigest returns a digest of the manifest, either from the supplied reference or from a fetched manifest.
-func (s *dockerImageSource) manifestDigest(ctx context.Context) (digest.Digest, error) {
+// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference,
+// or finally, from a fetched manifest.
+func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) {
+ if instanceDigest != nil {
+ return *instanceDigest, nil
+ }
if digested, ok := s.ref.ref.(reference.Digested); ok {
d := digested.Digest()
if d.Algorithm() == digest.Canonical {
@@ -211,8 +213,8 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context) (digest.Digest,
// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase,
// which is not nil.
-func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context) ([][]byte, error) {
- manifestDigest, err := s.manifestDigest(ctx)
+func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
if err != nil {
return nil, err
}
@@ -281,8 +283,8 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
}
// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension.
-func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context) ([][]byte, error) {
- manifestDigest, err := s.manifestDigest(ctx)
+func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/docker/docker_transport.go b/vendor/github.com/containers/image/docker/docker_transport.go
index 1d67cc4fc..cc0aa298a 100644
--- a/vendor/github.com/containers/image/docker/docker_transport.go
+++ b/vendor/github.com/containers/image/docker/docker_transport.go
@@ -122,11 +122,12 @@ func (ref dockerReference) PolicyConfigurationNamespaces() []string {
return policyconfiguration.DockerReferenceNamespaces(ref.ref)
}
-// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
-// The caller must call .Close() on the returned Image.
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
-func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) {
return newImage(ctx, ref)
}
diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go
index aab4a6d91..6e042582e 100644
--- a/vendor/github.com/containers/image/docker/tarfile/dest.go
+++ b/vendor/github.com/containers/image/docker/tarfile/dest.go
@@ -11,6 +11,7 @@ import (
"time"
"github.com/containers/image/docker/reference"
+ "github.com/containers/image/internal/tmpdir"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
@@ -18,8 +19,6 @@ import (
"github.com/sirupsen/logrus"
)
-const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
-
// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
type Destination struct {
writer io.Writer
@@ -107,7 +106,7 @@ func (d *Destination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types
if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size.
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
- streamCopy, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-tarfile-blob")
+ streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tarfile-blob")
if err != nil {
return types.BlobInfo{}, err
}
@@ -168,7 +167,7 @@ func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
func (d *Destination) PutManifest(m []byte) error {
// We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
// so the caller trying a different manifest kind would be pointless.
- var man manifest.Schema2
+ var man schema2Manifest
if err := json.Unmarshal(m, &man); err != nil {
return errors.Wrap(err, "Error parsing manifest")
}
@@ -177,12 +176,12 @@ func (d *Destination) PutManifest(m []byte) error {
}
layerPaths := []string{}
- for _, l := range man.LayersDescriptors {
+ for _, l := range man.Layers {
layerPaths = append(layerPaths, l.Digest.String())
}
items := []ManifestItem{{
- Config: man.ConfigDescriptor.Digest.String(),
+ Config: man.Config.Digest.String(),
RepoTags: []string{d.repoTag},
Layers: layerPaths,
Parent: "",
diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go
index 34d5ff32a..e2252c640 100644
--- a/vendor/github.com/containers/image/docker/tarfile/src.go
+++ b/vendor/github.com/containers/image/docker/tarfile/src.go
@@ -249,27 +249,33 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *image
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
-func (s *Source) GetManifest() ([]byte, string, error) {
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *Source) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ // How did we even get here? GetManifest(nil) has returned a manifest.DockerV2Schema2MediaType.
+ return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
+ }
if s.generatedManifest == nil {
if err := s.ensureCachedDataIsPresent(); err != nil {
return nil, "", err
}
- m := manifest.Schema2{
+ m := schema2Manifest{
SchemaVersion: 2,
MediaType: manifest.DockerV2Schema2MediaType,
- ConfigDescriptor: manifest.Schema2Descriptor{
+ Config: distributionDescriptor{
MediaType: manifest.DockerV2Schema2ConfigMediaType,
Size: int64(len(s.configBytes)),
Digest: s.configDigest,
},
- LayersDescriptors: []manifest.Schema2Descriptor{},
+ Layers: []distributionDescriptor{},
}
for _, diffID := range s.orderedDiffIDList {
li, ok := s.knownLayers[diffID]
if !ok {
return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
}
- m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{
+ m.Layers = append(m.Layers, distributionDescriptor{
Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
MediaType: manifest.DockerV2Schema2LayerMediaType,
Size: li.size,
@@ -284,13 +290,6 @@ func (s *Source) GetManifest() ([]byte, string, error) {
return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
}
-// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest
-// out of a manifest list.
-func (s *Source) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
- // How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType.
- return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
-}
-
type readCloseWrapper struct {
io.Reader
closeFunc func() error
@@ -355,6 +354,13 @@ func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
}
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-func (s *Source) GetSignatures(ctx context.Context) ([][]byte, error) {
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ if instanceDigest != nil {
+ // How did we even get here? GetManifest(nil) has returned a manifest.DockerV2Schema2MediaType.
+ return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
+ }
return [][]byte{}, nil
}
diff --git a/vendor/github.com/containers/image/docker/tarfile/types.go b/vendor/github.com/containers/image/docker/tarfile/types.go
index 4780d66c6..f16cc8c62 100644
--- a/vendor/github.com/containers/image/docker/tarfile/types.go
+++ b/vendor/github.com/containers/image/docker/tarfile/types.go
@@ -1,9 +1,6 @@
package tarfile
-import (
- "github.com/containers/image/manifest"
- "github.com/opencontainers/go-digest"
-)
+import "github.com/opencontainers/go-digest"
// Various data structures.
@@ -21,13 +18,30 @@ type ManifestItem struct {
Config string
RepoTags []string
Layers []string
- Parent imageID `json:",omitempty"`
- LayerSources map[diffID]manifest.Schema2Descriptor `json:",omitempty"`
+ Parent imageID `json:",omitempty"`
+ LayerSources map[diffID]distributionDescriptor `json:",omitempty"`
}
type imageID string
type diffID digest.Digest
+// Based on github.com/docker/distribution/blobs.go
+type distributionDescriptor struct {
+ MediaType string `json:"mediaType,omitempty"`
+ Size int64 `json:"size,omitempty"`
+ Digest digest.Digest `json:"digest,omitempty"`
+ URLs []string `json:"urls,omitempty"`
+}
+
+// Based on github.com/docker/distribution/manifest/schema2/manifest.go
+// FIXME: We are repeating this all over the place; make a public copy?
+type schema2Manifest struct {
+ SchemaVersion int `json:"schemaVersion"`
+ MediaType string `json:"mediaType,omitempty"`
+ Config distributionDescriptor `json:"config"`
+ Layers []distributionDescriptor `json:"layers"`
+}
+
// Based on github.com/docker/docker/image/image.go
// MOST CONTENT OMITTED AS UNNECESSARY
type image struct {
diff --git a/vendor/github.com/containers/image/image/docker_list.go b/vendor/github.com/containers/image/image/docker_list.go
index 4b152d261..53af9fbd7 100644
--- a/vendor/github.com/containers/image/image/docker_list.go
+++ b/vendor/github.com/containers/image/image/docker_list.go
@@ -2,6 +2,7 @@ package image
import (
"encoding/json"
+ "fmt"
"runtime"
"github.com/containers/image/manifest"
@@ -21,7 +22,7 @@ type platformSpec struct {
// A manifestDescriptor references a platform-specific manifest.
type manifestDescriptor struct {
- manifest.Schema2Descriptor
+ descriptor
Platform platformSpec `json:"platform"`
}
@@ -31,22 +32,36 @@ type manifestList struct {
Manifests []manifestDescriptor `json:"manifests"`
}
-func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (genericManifest, error) {
+// chooseDigestFromManifestList parses blob as a schema2 manifest list,
+// and returns the digest of the image appropriate for the current environment.
+func chooseDigestFromManifestList(ctx *types.SystemContext, blob []byte) (digest.Digest, error) {
+ wantedArch := runtime.GOARCH
+ if ctx != nil && ctx.ArchitectureChoice != "" {
+ wantedArch = ctx.ArchitectureChoice
+ }
+ wantedOS := runtime.GOOS
+ if ctx != nil && ctx.OSChoice != "" {
+ wantedOS = ctx.OSChoice
+ }
+
list := manifestList{}
- if err := json.Unmarshal(manblob, &list); err != nil {
- return nil, err
+ if err := json.Unmarshal(blob, &list); err != nil {
+ return "", err
}
- var targetManifestDigest digest.Digest
for _, d := range list.Manifests {
- if d.Platform.Architecture == runtime.GOARCH && d.Platform.OS == runtime.GOOS {
- targetManifestDigest = d.Digest
- break
+ if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS {
+ return d.Digest, nil
}
}
- if targetManifestDigest == "" {
- return nil, errors.New("no supported platform found in manifest list")
+ return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS)
+}
+
+func manifestSchema2FromManifestList(ctx *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
+ targetManifestDigest, err := chooseDigestFromManifestList(ctx, manblob)
+ if err != nil {
+ return nil, err
}
- manblob, mt, err := src.GetTargetManifest(targetManifestDigest)
+ manblob, mt, err := src.GetManifest(&targetManifestDigest)
if err != nil {
return nil, err
}
@@ -59,5 +74,20 @@ func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (gen
return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest)
}
- return manifestInstanceFromBlob(src, manblob, mt)
+ return manifestInstanceFromBlob(ctx, src, manblob, mt)
+}
+
+// ChooseManifestInstanceFromManifestList returns a digest of a manifest appropriate
+// for the current system from the manifest available from src.
+func ChooseManifestInstanceFromManifestList(ctx *types.SystemContext, src types.UnparsedImage) (digest.Digest, error) {
+ // For now this only handles manifest.DockerV2ListMediaType; we can generalize it later,
+ // probably along with manifest list editing.
+ blob, mt, err := src.Manifest()
+ if err != nil {
+ return "", err
+ }
+ if mt != manifest.DockerV2ListMediaType {
+ return "", fmt.Errorf("Internal error: Trying to select an image from a non-manifest-list manifest type %s", mt)
+ }
+ return chooseDigestFromManifestList(ctx, blob)
}
diff --git a/vendor/github.com/containers/image/image/docker_schema1.go b/vendor/github.com/containers/image/image/docker_schema1.go
index 86e30b3e0..4c3c78acd 100644
--- a/vendor/github.com/containers/image/image/docker_schema1.go
+++ b/vendor/github.com/containers/image/image/docker_schema1.go
@@ -2,7 +2,9 @@ package image
import (
"encoding/json"
+ "regexp"
"strings"
+ "time"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
@@ -12,25 +14,87 @@ import (
"github.com/pkg/errors"
)
+var (
+ validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
+)
+
+type fsLayersSchema1 struct {
+ BlobSum digest.Digest `json:"blobSum"`
+}
+
+type historySchema1 struct {
+ V1Compatibility string `json:"v1Compatibility"`
+}
+
+// historySchema1 is a string containing this. It is similar to v1Image but not the same, in particular note the ThrowAway field.
+type v1Compatibility struct {
+ ID string `json:"id"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created time.Time `json:"created"`
+ ContainerConfig struct {
+ Cmd []string
+ } `json:"container_config,omitempty"`
+ Author string `json:"author,omitempty"`
+ ThrowAway bool `json:"throwaway,omitempty"`
+}
+
type manifestSchema1 struct {
- m *manifest.Schema1
+ Name string `json:"name"`
+ Tag string `json:"tag"`
+ Architecture string `json:"architecture"`
+ FSLayers []fsLayersSchema1 `json:"fsLayers"`
+ History []historySchema1 `json:"history"`
+ SchemaVersion int `json:"schemaVersion"`
}
-func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) {
- m, err := manifest.Schema1FromManifest(manifestBlob)
- if err != nil {
+func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) {
+ mschema1 := &manifestSchema1{}
+ if err := json.Unmarshal(manifest, mschema1); err != nil {
return nil, err
}
- return &manifestSchema1{m: m}, nil
+ if mschema1.SchemaVersion != 1 {
+ return nil, errors.Errorf("unsupported schema version %d", mschema1.SchemaVersion)
+ }
+ if len(mschema1.FSLayers) != len(mschema1.History) {
+ return nil, errors.New("length of history not equal to number of layers")
+ }
+ if len(mschema1.FSLayers) == 0 {
+ return nil, errors.New("no FSLayers in manifest")
+ }
+
+ if err := fixManifestLayers(mschema1); err != nil {
+ return nil, err
+ }
+ return mschema1, nil
}
// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data.
-func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) genericManifest {
- return &manifestSchema1{m: manifest.Schema1FromComponents(ref, fsLayers, history, architecture)}
+func manifestSchema1FromComponents(ref reference.Named, fsLayers []fsLayersSchema1, history []historySchema1, architecture string) genericManifest {
+ var name, tag string
+ if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them.
+ name = reference.Path(ref)
+ if tagged, ok := ref.(reference.NamedTagged); ok {
+ tag = tagged.Tag()
+ }
+ }
+ return &manifestSchema1{
+ Name: name,
+ Tag: tag,
+ Architecture: architecture,
+ FSLayers: fsLayers,
+ History: history,
+ SchemaVersion: 1,
+ }
}
func (m *manifestSchema1) serialize() ([]byte, error) {
- return m.m.Serialize()
+ // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType.
+ unsigned, err := json.Marshal(*m)
+ if err != nil {
+ return nil, err
+ }
+ return manifest.AddDummyV2S1Signature(unsigned)
}
func (m *manifestSchema1) manifestMIMEType() string {
@@ -40,7 +104,7 @@ func (m *manifestSchema1) manifestMIMEType() string {
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
func (m *manifestSchema1) ConfigInfo() types.BlobInfo {
- return m.m.ConfigInfo()
+ return types.BlobInfo{}
}
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
@@ -64,7 +128,11 @@ func (m *manifestSchema1) OCIConfig() (*imgspecv1.Image, error) {
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *manifestSchema1) LayerInfos() []types.BlobInfo {
- return m.m.LayerInfos()
+ layers := make([]types.BlobInfo, len(m.FSLayers))
+ for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
+ layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
+ }
+ return layers
}
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
@@ -85,16 +153,16 @@ func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named)
} else {
tag = ""
}
- return m.m.Name != name || m.m.Tag != tag
+ return m.Name != name || m.Tag != tag
}
func (m *manifestSchema1) imageInspectInfo() (*types.ImageInspectInfo, error) {
v1 := &v1Image{}
- if err := json.Unmarshal([]byte(m.m.History[0].V1Compatibility), v1); err != nil {
+ if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), v1); err != nil {
return nil, err
}
i := &types.ImageInspectInfo{
- Tag: m.m.Tag,
+ Tag: m.Tag,
DockerVersion: v1.DockerVersion,
Created: v1.Created,
Architecture: v1.Architecture,
@@ -116,18 +184,25 @@ func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
- copy := manifestSchema1{m: manifest.Schema1Clone(m.m)}
+ copy := *m
if options.LayerInfos != nil {
- if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
- return nil, err
+ // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
+ if len(copy.FSLayers) != len(options.LayerInfos) {
+ return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos))
+ }
+ for i, info := range options.LayerInfos {
+ // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
+ // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
+ // So, we don't bother recomputing the IDs in m.History.V1Compatibility.
+ copy.FSLayers[(len(options.LayerInfos)-1)-i].BlobSum = info.Digest
}
}
if options.EmbeddedDockerReference != nil {
- copy.m.Name = reference.Path(options.EmbeddedDockerReference)
+ copy.Name = reference.Path(options.EmbeddedDockerReference)
if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged {
- copy.m.Tag = tagged.Tag()
+ copy.Tag = tagged.Tag()
} else {
- copy.m.Tag = ""
+ copy.Tag = ""
}
}
@@ -137,7 +212,21 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ
// We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so,
// handle conversions between them by doing nothing.
case manifest.DockerV2Schema2MediaType:
- return copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs)
+ m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs)
+ if err != nil {
+ return nil, err
+ }
+ return memoryImageFromManifest(m2), nil
+ case imgspecv1.MediaTypeImageManifest:
+ // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest
+ m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs)
+ if err != nil {
+ return nil, err
+ }
+ return m2.UpdatedImage(types.ManifestUpdateOptions{
+ ManifestMIMEType: imgspecv1.MediaTypeImageManifest,
+ InformationOnly: options.InformationOnly,
+ })
default:
return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType)
}
@@ -145,20 +234,78 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ
return memoryImageFromManifest(&copy), nil
}
+// fixManifestLayers, after validating the supplied manifest
+// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History),
+// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates,
+// both from manifest.History and manifest.FSLayers).
+// Note that even after this succeeds, manifest.FSLayers may contain duplicate entries
+// (for Dockerfile operations which change the configuration but not the filesystem).
+func fixManifestLayers(manifest *manifestSchema1) error {
+ type imageV1 struct {
+ ID string
+ Parent string
+ }
+ // Per the specification, we can assume that len(manifest.FSLayers) == len(manifest.History)
+ imgs := make([]*imageV1, len(manifest.FSLayers))
+ for i := range manifest.FSLayers {
+ img := &imageV1{}
+
+ if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil {
+ return err
+ }
+
+ imgs[i] = img
+ if err := validateV1ID(img.ID); err != nil {
+ return err
+ }
+ }
+ if imgs[len(imgs)-1].Parent != "" {
+ return errors.New("Invalid parent ID in the base layer of the image")
+ }
+ // check general duplicates to error instead of a deadlock
+ idmap := make(map[string]struct{})
+ var lastID string
+ for _, img := range imgs {
+ // skip IDs that appear after each other, we handle those later
+ if _, exists := idmap[img.ID]; img.ID != lastID && exists {
+ return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
+ }
+ lastID = img.ID
+ idmap[lastID] = struct{}{}
+ }
+ // backwards loop so that we keep the remaining indexes after removing items
+ for i := len(imgs) - 2; i >= 0; i-- {
+ if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
+ manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...)
+ manifest.History = append(manifest.History[:i], manifest.History[i+1:]...)
+ } else if imgs[i].Parent != imgs[i+1].ID {
+ return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
+ }
+ }
+ return nil
+}
+
+func validateV1ID(id string) error {
+ if ok := validHex.MatchString(id); !ok {
+ return errors.Errorf("image ID %q is invalid", id)
+ }
+ return nil
+}
+
// Based on github.com/docker/docker/distribution/pull_v2.go
-func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) {
- if len(m.m.History) == 0 {
+func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) {
+ if len(m.History) == 0 {
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
}
- if len(m.m.History) != len(m.m.FSLayers) {
- return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.History), len(m.m.FSLayers))
+ if len(m.History) != len(m.FSLayers) {
+ return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers))
}
- if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) {
- return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
+ if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.FSLayers) {
+ return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers))
}
- if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) {
- return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers))
+ if layerDiffIDs != nil && len(layerDiffIDs) != len(m.FSLayers) {
+ return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers))
}
rootFS := rootFS{
@@ -166,13 +313,13 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
DiffIDs: []digest.Digest{},
BaseLayer: "",
}
- var layers []manifest.Schema2Descriptor
- history := make([]imageHistory, len(m.m.History))
- for v1Index := len(m.m.History) - 1; v1Index >= 0; v1Index-- {
- v2Index := (len(m.m.History) - 1) - v1Index
+ var layers []descriptor
+ history := make([]imageHistory, len(m.History))
+ for v1Index := len(m.History) - 1; v1Index >= 0; v1Index-- {
+ v2Index := (len(m.History) - 1) - v1Index
- var v1compat manifest.Schema1V1Compatibility
- if err := json.Unmarshal([]byte(m.m.History[v1Index].V1Compatibility), &v1compat); err != nil {
+ var v1compat v1Compatibility
+ if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil {
return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index)
}
history[v2Index] = imageHistory{
@@ -192,26 +339,25 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
if layerDiffIDs != nil {
d = layerDiffIDs[v2Index]
}
- layers = append(layers, manifest.Schema2Descriptor{
+ layers = append(layers, descriptor{
MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
Size: size,
- Digest: m.m.FSLayers[v1Index].BlobSum,
+ Digest: m.FSLayers[v1Index].BlobSum,
})
rootFS.DiffIDs = append(rootFS.DiffIDs, d)
}
}
- configJSON, err := configJSONFromV1Config([]byte(m.m.History[0].V1Compatibility), rootFS, history)
+ configJSON, err := configJSONFromV1Config([]byte(m.History[0].V1Compatibility), rootFS, history)
if err != nil {
return nil, err
}
- configDescriptor := manifest.Schema2Descriptor{
+ configDescriptor := descriptor{
MediaType: "application/vnd.docker.container.image.v1+json",
Size: int64(len(configJSON)),
Digest: digest.FromBytes(configJSON),
}
- m2 := manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers)
- return memoryImageFromManifest(m2), nil
+ return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil
}
func configJSONFromV1Config(v1ConfigJSON []byte, rootFS rootFS, history []imageHistory) ([]byte, error) {
diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go
index 7ccd061c0..848e8743c 100644
--- a/vendor/github.com/containers/image/image/docker_schema2.go
+++ b/vendor/github.com/containers/image/image/docker_schema2.go
@@ -29,44 +29,54 @@ var gzippedEmptyLayer = []byte{
// gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer
const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
+type descriptor struct {
+ MediaType string `json:"mediaType"`
+ Size int64 `json:"size"`
+ Digest digest.Digest `json:"digest"`
+ URLs []string `json:"urls,omitempty"`
+}
+
type manifestSchema2 struct {
- src types.ImageSource // May be nil if configBlob is not nil
- configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
- m *manifest.Schema2
+ src types.ImageSource // May be nil if configBlob is not nil
+ configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
+ SchemaVersion int `json:"schemaVersion"`
+ MediaType string `json:"mediaType"`
+ ConfigDescriptor descriptor `json:"config"`
+ LayersDescriptors []descriptor `json:"layers"`
}
-func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
- m, err := manifest.Schema2FromManifest(manifestBlob)
- if err != nil {
+func manifestSchema2FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) {
+ v2s2 := manifestSchema2{src: src}
+ if err := json.Unmarshal(manifest, &v2s2); err != nil {
return nil, err
}
- return &manifestSchema2{
- src: src,
- m: m,
- }, nil
+ return &v2s2, nil
}
// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
-func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest {
+func manifestSchema2FromComponents(config descriptor, src types.ImageSource, configBlob []byte, layers []descriptor) genericManifest {
return &manifestSchema2{
- src: src,
- configBlob: configBlob,
- m: manifest.Schema2FromComponents(config, layers),
+ src: src,
+ configBlob: configBlob,
+ SchemaVersion: 2,
+ MediaType: manifest.DockerV2Schema2MediaType,
+ ConfigDescriptor: config,
+ LayersDescriptors: layers,
}
}
func (m *manifestSchema2) serialize() ([]byte, error) {
- return m.m.Serialize()
+ return json.Marshal(*m)
}
func (m *manifestSchema2) manifestMIMEType() string {
- return m.m.MediaType
+ return m.MediaType
}
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
- return m.m.ConfigInfo()
+ return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
}
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
@@ -95,9 +105,9 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
}
stream, _, err := m.src.GetBlob(types.BlobInfo{
- Digest: m.m.ConfigDescriptor.Digest,
- Size: m.m.ConfigDescriptor.Size,
- URLs: m.m.ConfigDescriptor.URLs,
+ Digest: m.ConfigDescriptor.Digest,
+ Size: m.ConfigDescriptor.Size,
+ URLs: m.ConfigDescriptor.URLs,
})
if err != nil {
return nil, err
@@ -108,8 +118,8 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
return nil, err
}
computedDigest := digest.FromBytes(blob)
- if computedDigest != m.m.ConfigDescriptor.Digest {
- return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
+ if computedDigest != m.ConfigDescriptor.Digest {
+ return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
}
m.configBlob = blob
}
@@ -120,7 +130,15 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
- return m.m.LayerInfos()
+ blobs := []types.BlobInfo{}
+ for _, layer := range m.LayersDescriptors {
+ blobs = append(blobs, types.BlobInfo{
+ Digest: layer.Digest,
+ Size: layer.Size,
+ URLs: layer.URLs,
+ })
+ }
+ return blobs
}
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
@@ -161,14 +179,17 @@ func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
- copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
- src: m.src,
- configBlob: m.configBlob,
- m: manifest.Schema2Clone(m.m),
- }
+ copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
if options.LayerInfos != nil {
- if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
- return nil, err
+ if len(copy.LayersDescriptors) != len(options.LayerInfos) {
+ return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
+ }
+ copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos))
+ for i, info := range options.LayerInfos {
+ copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType
+ copy.LayersDescriptors[i].Digest = info.Digest
+ copy.LayersDescriptors[i].Size = info.Size
+ copy.LayersDescriptors[i].URLs = info.URLs
}
}
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
@@ -186,15 +207,6 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ
return memoryImageFromManifest(&copy), nil
}
-func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
- return imgspecv1.Descriptor{
- MediaType: d.MediaType,
- Size: d.Size,
- Digest: d.Digest,
- URLs: d.URLs,
- }
-}
-
func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) {
configOCI, err := m.OCIConfig()
if err != nil {
@@ -205,16 +217,18 @@ func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) {
return nil, err
}
- config := imgspecv1.Descriptor{
- MediaType: imgspecv1.MediaTypeImageConfig,
- Size: int64(len(configOCIBytes)),
- Digest: digest.FromBytes(configOCIBytes),
+ config := descriptorOCI1{
+ descriptor: descriptor{
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ Size: int64(len(configOCIBytes)),
+ Digest: digest.FromBytes(configOCIBytes),
+ },
}
- layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
+ layers := make([]descriptorOCI1, len(m.LayersDescriptors))
for idx := range layers {
- layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
- if m.m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType {
+ layers[idx] = descriptorOCI1{descriptor: m.LayersDescriptors[idx]}
+ if m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType {
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
} else {
// we assume layers are gzip'ed because docker v2s2 only deals with
@@ -239,8 +253,8 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
}
// Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
- fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
- history := make([]manifest.Schema1History, len(imageConfig.History))
+ fsLayers := make([]fsLayersSchema1, len(imageConfig.History))
+ history := make([]historySchema1, len(imageConfig.History))
nonemptyLayerIndex := 0
var parentV1ID string // Set in the loop
v1ID := ""
@@ -268,10 +282,10 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
}
blobDigest = gzippedEmptyLayerDigest
} else {
- if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
- return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
+ if nonemptyLayerIndex >= len(m.LayersDescriptors) {
+ return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors))
}
- blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
+ blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest
nonemptyLayerIndex++
}
@@ -282,7 +296,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
}
v1ID = v
- fakeImage := manifest.Schema1V1Compatibility{
+ fakeImage := v1Compatibility{
ID: v1ID,
Parent: parentV1ID,
Comment: historyEntry.Comment,
@@ -296,8 +310,8 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
}
- fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
- history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
+ fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest}
+ history[v1Index] = historySchema1{V1Compatibility: string(v1CompatibilityBytes)}
// Note that parentV1ID of the top layer is preserved when exiting this loop
}
diff --git a/vendor/github.com/containers/image/image/manifest.go b/vendor/github.com/containers/image/image/manifest.go
index 4a79ac277..132fdd58b 100644
--- a/vendor/github.com/containers/image/image/manifest.go
+++ b/vendor/github.com/containers/image/image/manifest.go
@@ -1,7 +1,6 @@
package image
import (
- "fmt"
"time"
"github.com/containers/image/docker/reference"
@@ -88,18 +87,32 @@ type genericManifest interface {
UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error)
}
-func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string) (genericManifest, error) {
- switch manifest.NormalizedMIMEType(mt) {
- case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
+// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src.
+// If manblob is a manifest list, it implicitly chooses an appropriate image from the list.
+func manifestInstanceFromBlob(ctx *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) {
+ switch mt {
+ // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
+ // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
+ // need to happen within the ImageSource.
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json":
return manifestSchema1FromManifest(manblob)
case imgspecv1.MediaTypeImageManifest:
return manifestOCI1FromManifest(src, manblob)
case manifest.DockerV2Schema2MediaType:
return manifestSchema2FromManifest(src, manblob)
case manifest.DockerV2ListMediaType:
- return manifestSchema2FromManifestList(src, manblob)
- default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.
- return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
+ return manifestSchema2FromManifestList(ctx, src, manblob)
+ default:
+ // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
+ // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
+ // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
+ //
+ // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
+ // This makes no real sense, but it happens
+ // because requests for manifests are
+ // redirected to a content distribution
+ // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
+ return manifestSchema1FromManifest(manblob)
}
}
diff --git a/vendor/github.com/containers/image/image/memory.go b/vendor/github.com/containers/image/image/memory.go
index 1e8bb3a4a..646dbe249 100644
--- a/vendor/github.com/containers/image/image/memory.go
+++ b/vendor/github.com/containers/image/image/memory.go
@@ -33,11 +33,6 @@ func (i *memoryImage) Reference() types.ImageReference {
return nil
}
-// Close removes resources associated with an initialized UnparsedImage, if any.
-func (i *memoryImage) Close() error {
- return nil
-}
-
// Size returns the size of the image as stored, if known, or -1 if not.
func (i *memoryImage) Size() (int64, error) {
return -1, nil
@@ -66,15 +61,3 @@ func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) {
func (i *memoryImage) Inspect() (*types.ImageInspectInfo, error) {
return inspectManifest(i.genericManifest)
}
-
-// IsMultiImage returns true if the image's manifest is a list of images, false otherwise.
-func (i *memoryImage) IsMultiImage() bool {
- return false
-}
-
-// UpdatedLayerInfos returns an updated set of layer blob information which may not match the manifest.
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (i *memoryImage) UpdatedLayerInfos() []types.BlobInfo {
- return i.LayerInfos()
-}
diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go
index 77ddedae8..8fe851837 100644
--- a/vendor/github.com/containers/image/image/oci.go
+++ b/vendor/github.com/containers/image/image/oci.go
@@ -12,34 +12,41 @@ import (
"github.com/pkg/errors"
)
+type descriptorOCI1 struct {
+ descriptor
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
+
type manifestOCI1 struct {
- src types.ImageSource // May be nil if configBlob is not nil
- configBlob []byte // If set, corresponds to contents of m.Config.
- m *manifest.OCI1
+ src types.ImageSource // May be nil if configBlob is not nil
+ configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
+ SchemaVersion int `json:"schemaVersion"`
+ ConfigDescriptor descriptorOCI1 `json:"config"`
+ LayersDescriptors []descriptorOCI1 `json:"layers"`
+ Annotations map[string]string `json:"annotations,omitempty"`
}
-func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
- m, err := manifest.OCI1FromManifest(manifestBlob)
- if err != nil {
+func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) {
+ oci := manifestOCI1{src: src}
+ if err := json.Unmarshal(manifest, &oci); err != nil {
return nil, err
}
- return &manifestOCI1{
- src: src,
- m: m,
- }, nil
+ return &oci, nil
}
// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data:
-func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest {
+func manifestOCI1FromComponents(config descriptorOCI1, src types.ImageSource, configBlob []byte, layers []descriptorOCI1) genericManifest {
return &manifestOCI1{
- src: src,
- configBlob: configBlob,
- m: manifest.OCI1FromComponents(config, layers),
+ src: src,
+ configBlob: configBlob,
+ SchemaVersion: 2,
+ ConfigDescriptor: config,
+ LayersDescriptors: layers,
}
}
func (m *manifestOCI1) serialize() ([]byte, error) {
- return m.m.Serialize()
+ return json.Marshal(*m)
}
func (m *manifestOCI1) manifestMIMEType() string {
@@ -49,7 +56,7 @@ func (m *manifestOCI1) manifestMIMEType() string {
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
- return m.m.ConfigInfo()
+ return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size, Annotations: m.ConfigDescriptor.Annotations}
}
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
@@ -60,9 +67,9 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
}
stream, _, err := m.src.GetBlob(types.BlobInfo{
- Digest: m.m.Config.Digest,
- Size: m.m.Config.Size,
- URLs: m.m.Config.URLs,
+ Digest: m.ConfigDescriptor.Digest,
+ Size: m.ConfigDescriptor.Size,
+ URLs: m.ConfigDescriptor.URLs,
})
if err != nil {
return nil, err
@@ -73,8 +80,8 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
return nil, err
}
computedDigest := digest.FromBytes(blob)
- if computedDigest != m.m.Config.Digest {
- return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest)
+ if computedDigest != m.ConfigDescriptor.Digest {
+ return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
}
m.configBlob = blob
}
@@ -100,7 +107,11 @@ func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) {
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
- return m.m.LayerInfos()
+ blobs := []types.BlobInfo{}
+ for _, layer := range m.LayersDescriptors {
+ blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType})
+ }
+ return blobs
}
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
@@ -141,14 +152,18 @@ func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdat
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
- copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc.
- src: m.src,
- configBlob: m.configBlob,
- m: manifest.OCI1Clone(m.m),
- }
+ copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
if options.LayerInfos != nil {
- if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
- return nil, err
+ if len(copy.LayersDescriptors) != len(options.LayerInfos) {
+ return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
+ }
+ copy.LayersDescriptors = make([]descriptorOCI1, len(options.LayerInfos))
+ for i, info := range options.LayerInfos {
+ copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType
+ copy.LayersDescriptors[i].Digest = info.Digest
+ copy.LayersDescriptors[i].Size = info.Size
+ copy.LayersDescriptors[i].Annotations = info.Annotations
+ copy.LayersDescriptors[i].URLs = info.URLs
}
}
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care.
@@ -164,26 +179,17 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.
return memoryImageFromManifest(&copy), nil
}
-func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor {
- return manifest.Schema2Descriptor{
- MediaType: d.MediaType,
- Size: d.Size,
- Digest: d.Digest,
- URLs: d.URLs,
- }
-}
-
func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) {
// Create a copy of the descriptor.
- config := schema2DescriptorFromOCI1Descriptor(m.m.Config)
+ config := m.ConfigDescriptor.descriptor
// The only difference between OCI and DockerSchema2 is the mediatypes. The
// media type of the manifest is handled by manifestSchema2FromComponents.
config.MediaType = manifest.DockerV2Schema2ConfigMediaType
- layers := make([]manifest.Schema2Descriptor, len(m.m.Layers))
+ layers := make([]descriptor, len(m.LayersDescriptors))
for idx := range layers {
- layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx])
+ layers[idx] = m.LayersDescriptors[idx].descriptor
layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
}
diff --git a/vendor/github.com/containers/image/image/sourced.go b/vendor/github.com/containers/image/image/sourced.go
index 1293f7d30..bdda05a05 100644
--- a/vendor/github.com/containers/image/image/sourced.go
+++ b/vendor/github.com/containers/image/image/sourced.go
@@ -4,12 +4,22 @@
package image
import (
- "github.com/containers/image/manifest"
"github.com/containers/image/types"
)
-// FromSource returns a types.Image implementation for source.
-// The caller must call .Close() on the returned Image.
+// imageCloser implements types.ImageCloser, perhaps allowing simple users
+// to use a single object without having keep a reference to a types.ImageSource
+// only to call types.ImageSource.Close().
+type imageCloser struct {
+ types.Image
+ src types.ImageSource
+}
+
+// FromSource returns a types.ImageCloser implementation for the default instance of source.
+// If source is a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate image instance.
+//
+// The caller must call .Close() on the returned ImageCloser.
//
// FromSource “takes ownership” of the input ImageSource and will call src.Close()
// when the image is closed. (This does not prevent callers from using both the
@@ -18,8 +28,19 @@ import (
//
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
-func FromSource(src types.ImageSource) (types.Image, error) {
- return FromUnparsedImage(UnparsedFromSource(src))
+func FromSource(ctx *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) {
+ img, err := FromUnparsedImage(ctx, UnparsedInstance(src, nil))
+ if err != nil {
+ return nil, err
+ }
+ return &imageCloser{
+ Image: img,
+ src: src,
+ }, nil
+}
+
+func (ic *imageCloser) Close() error {
+ return ic.src.Close()
}
// sourcedImage is a general set of utilities for working with container images,
@@ -38,27 +59,22 @@ type sourcedImage struct {
}
// FromUnparsedImage returns a types.Image implementation for unparsed.
-// The caller must call .Close() on the returned Image.
+// If unparsed represents a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate single image.
//
-// FromSource “takes ownership” of the input UnparsedImage and will call uparsed.Close()
-// when the image is closed. (This does not prevent callers from using both the
-// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to
-// keep a reference to the Image.)
-func FromUnparsedImage(unparsed *UnparsedImage) (types.Image, error) {
+// The Image must not be used after the underlying ImageSource is Close()d.
+func FromUnparsedImage(ctx *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) {
// Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage:
// we want to be able to use unparsed.src. We could make that an explicit interface, but, well,
// this is the only UnparsedImage implementation around, anyway.
- // Also, we do not explicitly implement types.Image.Close; we let the implementation fall through to
- // unparsed.Close.
-
// NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest().
manifestBlob, manifestMIMEType, err := unparsed.Manifest()
if err != nil {
return nil, err
}
- parsedManifest, err := manifestInstanceFromBlob(unparsed.src, manifestBlob, manifestMIMEType)
+ parsedManifest, err := manifestInstanceFromBlob(ctx, unparsed.src, manifestBlob, manifestMIMEType)
if err != nil {
return nil, err
}
@@ -84,11 +100,3 @@ func (i *sourcedImage) Manifest() ([]byte, string, error) {
func (i *sourcedImage) Inspect() (*types.ImageInspectInfo, error) {
return inspectManifest(i.genericManifest)
}
-
-func (i *sourcedImage) IsMultiImage() bool {
- return i.manifestMIMEType == manifest.DockerV2ListMediaType
-}
-
-func (i *sourcedImage) UpdatedLayerInfos() []types.BlobInfo {
- return i.UnparsedImage.UpdatedLayerInfos()
-}
diff --git a/vendor/github.com/containers/image/image/unparsed.go b/vendor/github.com/containers/image/image/unparsed.go
index 7bcac06e0..0a8f78b66 100644
--- a/vendor/github.com/containers/image/image/unparsed.go
+++ b/vendor/github.com/containers/image/image/unparsed.go
@@ -11,8 +11,10 @@ import (
)
// UnparsedImage implements types.UnparsedImage .
+// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
type UnparsedImage struct {
src types.ImageSource
+ instanceDigest *digest.Digest
cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
// A private cache for Manifest(), may be the empty string if guessing failed.
// Valid iff cachedManifest is not nil.
@@ -20,49 +22,41 @@ type UnparsedImage struct {
cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known.
}
-// UnparsedFromSource returns a types.UnparsedImage implementation for source.
-// The caller must call .Close() on the returned UnparsedImage.
+// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list).
//
-// UnparsedFromSource “takes ownership” of the input ImageSource and will call src.Close()
-// when the image is closed. (This does not prevent callers from using both the
-// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to
-// keep a reference to the UnparsedImage.)
-func UnparsedFromSource(src types.ImageSource) *UnparsedImage {
- return &UnparsedImage{src: src}
+// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
+func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
+ return &UnparsedImage{
+ src: src,
+ instanceDigest: instanceDigest,
+ }
}
// Reference returns the reference used to set up this source, _as specified by the user_
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
func (i *UnparsedImage) Reference() types.ImageReference {
+ // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity.
return i.src.Reference()
}
-// Close removes resources associated with an initialized UnparsedImage, if any.
-func (i *UnparsedImage) Close() error {
- return i.src.Close()
-}
-
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
func (i *UnparsedImage) Manifest() ([]byte, string, error) {
if i.cachedManifest == nil {
- m, mt, err := i.src.GetManifest()
+ m, mt, err := i.src.GetManifest(i.instanceDigest)
if err != nil {
return nil, "", err
}
// ImageSource.GetManifest does not do digest verification, but we do;
// this immediately protects also any user of types.Image.
- ref := i.Reference().DockerReference()
- if ref != nil {
- if canonical, ok := ref.(reference.Canonical); ok {
- digest := digest.Digest(canonical.Digest())
- matches, err := manifest.MatchesDigest(m, digest)
- if err != nil {
- return nil, "", errors.Wrap(err, "Error computing manifest digest")
- }
- if !matches {
- return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
- }
+ if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
+ matches, err := manifest.MatchesDigest(m, digest)
+ if err != nil {
+ return nil, "", errors.Wrap(err, "Error computing manifest digest")
+ }
+ if !matches {
+ return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
}
}
@@ -72,10 +66,26 @@ func (i *UnparsedImage) Manifest() ([]byte, string, error) {
return i.cachedManifest, i.cachedManifestMIMEType, nil
}
+// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known.
+// The bool return value seems redundant with digest != ""; it is used explicitly
+// to refuse (unexpected) situations when the digest exists but is "".
+func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) {
+ if i.instanceDigest != nil {
+ return *i.instanceDigest, true
+ }
+ ref := i.Reference().DockerReference()
+ if ref != nil {
+ if canonical, ok := ref.(reference.Canonical); ok {
+ return canonical.Digest(), true
+ }
+ }
+ return "", false
+}
+
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
if i.cachedSignatures == nil {
- sigs, err := i.src.GetSignatures(ctx)
+ sigs, err := i.src.GetSignatures(ctx, i.instanceDigest)
if err != nil {
return nil, err
}
@@ -83,10 +93,3 @@ func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
}
return i.cachedSignatures, nil
}
-
-// UpdatedLayerInfos returns an updated set of layer blob information which may not match the manifest.
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (i *UnparsedImage) UpdatedLayerInfos() []types.BlobInfo {
- return i.src.UpdatedLayerInfos()
-}
diff --git a/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go
new file mode 100644
index 000000000..a28020edc
--- /dev/null
+++ b/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go
@@ -0,0 +1,19 @@
+package tmpdir
+
+import (
+ "os"
+ "runtime"
+)
+
+// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files.
+// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp
+// which on systemd based systems could be the unsuitable tmpfs filesystem.
+func TemporaryDirectoryForBigFiles() string {
+ var temporaryDirectoryForBigFiles string
+ if runtime.GOOS == "windows" {
+ temporaryDirectoryForBigFiles = os.TempDir()
+ } else {
+ temporaryDirectoryForBigFiles = "/var/tmp"
+ }
+ return temporaryDirectoryForBigFiles
+}
diff --git a/vendor/github.com/containers/image/manifest/docker_schema1.go b/vendor/github.com/containers/image/manifest/docker_schema1.go
deleted file mode 100644
index f4ce73207..000000000
--- a/vendor/github.com/containers/image/manifest/docker_schema1.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package manifest
-
-import (
- "encoding/json"
- "regexp"
- "time"
-
- "github.com/containers/image/docker/reference"
- "github.com/containers/image/types"
- "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
-)
-
-// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1.
-type Schema1FSLayers struct {
- BlobSum digest.Digest `json:"blobSum"`
-}
-
-// Schema1History is an entry of the "history" array in docker/distribution schema 1.
-type Schema1History struct {
- V1Compatibility string `json:"v1Compatibility"`
-}
-
-// Schema1 is a manifest in docker/distribution schema 1.
-type Schema1 struct {
- Name string `json:"name"`
- Tag string `json:"tag"`
- Architecture string `json:"architecture"`
- FSLayers []Schema1FSLayers `json:"fsLayers"`
- History []Schema1History `json:"history"`
- SchemaVersion int `json:"schemaVersion"`
-}
-
-// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1.
-type Schema1V1Compatibility struct {
- ID string `json:"id"`
- Parent string `json:"parent,omitempty"`
- Comment string `json:"comment,omitempty"`
- Created time.Time `json:"created"`
- ContainerConfig struct {
- Cmd []string
- } `json:"container_config,omitempty"`
- Author string `json:"author,omitempty"`
- ThrowAway bool `json:"throwaway,omitempty"`
-}
-
-// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob.
-// (NOTE: The instance is not necessary a literal representation of the original blob,
-// layers with duplicate IDs are eliminated.)
-func Schema1FromManifest(manifest []byte) (*Schema1, error) {
- s1 := Schema1{}
- if err := json.Unmarshal(manifest, &s1); err != nil {
- return nil, err
- }
- if s1.SchemaVersion != 1 {
- return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion)
- }
- if len(s1.FSLayers) != len(s1.History) {
- return nil, errors.New("length of history not equal to number of layers")
- }
- if len(s1.FSLayers) == 0 {
- return nil, errors.New("no FSLayers in manifest")
- }
- if err := s1.fixManifestLayers(); err != nil {
- return nil, err
- }
- return &s1, nil
-}
-
-// Schema1FromComponents creates an Schema1 manifest instance from the supplied data.
-func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) *Schema1 {
- var name, tag string
- if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them.
- name = reference.Path(ref)
- if tagged, ok := ref.(reference.NamedTagged); ok {
- tag = tagged.Tag()
- }
- }
- return &Schema1{
- Name: name,
- Tag: tag,
- Architecture: architecture,
- FSLayers: fsLayers,
- History: history,
- SchemaVersion: 1,
- }
-}
-
-// Schema1Clone creates a copy of the supplied Schema1 manifest.
-func Schema1Clone(src *Schema1) *Schema1 {
- copy := *src
- return &copy
-}
-
-// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
-func (m *Schema1) ConfigInfo() types.BlobInfo {
- return types.BlobInfo{}
-}
-
-// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (m *Schema1) LayerInfos() []types.BlobInfo {
- layers := make([]types.BlobInfo, len(m.FSLayers))
- for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
- layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
- }
- return layers
-}
-
-// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
-func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
- // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
- if len(m.FSLayers) != len(layerInfos) {
- return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos))
- }
- for i, info := range layerInfos {
- // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
- // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
- // So, we don't bother recomputing the IDs in m.History.V1Compatibility.
- m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest
- }
- return nil
-}
-
-// Serialize returns the manifest in a blob format.
-// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
-func (m *Schema1) Serialize() ([]byte, error) {
- // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType.
- unsigned, err := json.Marshal(*m)
- if err != nil {
- return nil, err
- }
- return AddDummyV2S1Signature(unsigned)
-}
-
-// fixManifestLayers, after validating the supplied manifest
-// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History),
-// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates,
-// both from m.History and m.FSLayers).
-// Note that even after this succeeds, m.FSLayers may contain duplicate entries
-// (for Dockerfile operations which change the configuration but not the filesystem).
-func (m *Schema1) fixManifestLayers() error {
- type imageV1 struct {
- ID string
- Parent string
- }
- // Per the specification, we can assume that len(m.FSLayers) == len(m.History)
- imgs := make([]*imageV1, len(m.FSLayers))
- for i := range m.FSLayers {
- img := &imageV1{}
-
- if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
- return err
- }
-
- imgs[i] = img
- if err := validateV1ID(img.ID); err != nil {
- return err
- }
- }
- if imgs[len(imgs)-1].Parent != "" {
- return errors.New("Invalid parent ID in the base layer of the image")
- }
- // check general duplicates to error instead of a deadlock
- idmap := make(map[string]struct{})
- var lastID string
- for _, img := range imgs {
- // skip IDs that appear after each other, we handle those later
- if _, exists := idmap[img.ID]; img.ID != lastID && exists {
- return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
- }
- lastID = img.ID
- idmap[lastID] = struct{}{}
- }
- // backwards loop so that we keep the remaining indexes after removing items
- for i := len(imgs) - 2; i >= 0; i-- {
- if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
- m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
- m.History = append(m.History[:i], m.History[i+1:]...)
- } else if imgs[i].Parent != imgs[i+1].ID {
- return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
- }
- }
- return nil
-}
-
-var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
-
-func validateV1ID(id string) error {
- if ok := validHex.MatchString(id); !ok {
- return errors.Errorf("image ID %q is invalid", id)
- }
- return nil
-}
-
-// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
-func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
- s1 := &Schema1V1Compatibility{}
- if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil {
- return nil, err
- }
- return &types.ImageInspectInfo{
- Tag: m.Tag,
- Created: s1.Created,
- DockerVersion: "",
- Labels: make(map[string]string),
- Architecture: "",
- Os: "",
- Layers: []string{},
- }, nil
-}
diff --git a/vendor/github.com/containers/image/manifest/docker_schema2.go b/vendor/github.com/containers/image/manifest/docker_schema2.go
deleted file mode 100644
index a44e561b8..000000000
--- a/vendor/github.com/containers/image/manifest/docker_schema2.go
+++ /dev/null
@@ -1,241 +0,0 @@
-package manifest
-
-import (
- "encoding/json"
- "time"
-
- "github.com/containers/image/pkg/strslice"
- "github.com/containers/image/types"
- "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
-)
-
-// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
-type Schema2Descriptor struct {
- MediaType string `json:"mediaType"`
- Size int64 `json:"size"`
- Digest digest.Digest `json:"digest"`
- URLs []string `json:"urls,omitempty"`
-}
-
-// Schema2 is a manifest in docker/distribution schema 2.
-type Schema2 struct {
- SchemaVersion int `json:"schemaVersion"`
- MediaType string `json:"mediaType"`
- ConfigDescriptor Schema2Descriptor `json:"config"`
- LayersDescriptors []Schema2Descriptor `json:"layers"`
-}
-
-// Schema2Port is a Port, a string containing port number and protocol in the
-// format "80/tcp", from docker/go-connections/nat.
-type Schema2Port string
-
-// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from
-// docker/go-connections/nat.
-type Schema2PortSet map[Schema2Port]struct{}
-
-// Schema2HealthConfig is a HealthConfig, which holds configuration settings
-// for the HEALTHCHECK feature, from docker/docker/api/types/container.
-type Schema2HealthConfig struct {
- // Test is the test to perform to check that the container is healthy.
- // An empty slice means to inherit the default.
- // The options are:
- // {} : inherit healthcheck
- // {"NONE"} : disable healthcheck
- // {"CMD", args...} : exec arguments directly
- // {"CMD-SHELL", command} : run command with system's default shell
- Test []string `json:",omitempty"`
-
- // Zero means to inherit. Durations are expressed as integer nanoseconds.
- Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
- Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
-
- // Retries is the number of consecutive failures needed to consider a container as unhealthy.
- // Zero means inherit.
- Retries int `json:",omitempty"`
-}
-
-// Schema2Config is a Config in docker/docker/api/types/container.
-type Schema2Config struct {
- Hostname string // Hostname
- Domainname string // Domainname
- User string // User that will run the command(s) inside the container, also support user:group
- AttachStdin bool // Attach the standard input, makes possible user interaction
- AttachStdout bool // Attach the standard output
- AttachStderr bool // Attach the standard error
- ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports
- Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
- OpenStdin bool // Open stdin
- StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
- Env []string // List of environment variable to set in the container
- Cmd strslice.StrSlice // Command to run when starting the container
- Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
- ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
- Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
- Volumes map[string]struct{} // List of volumes (mounts) used for the container
- WorkingDir string // Current directory (PWD) in the command will be launched
- Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
- NetworkDisabled bool `json:",omitempty"` // Is network disabled
- MacAddress string `json:",omitempty"` // Mac Address of the container
- OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
- Labels map[string]string // List of labels set to this container
- StopSignal string `json:",omitempty"` // Signal to stop a container
- StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
- Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
-}
-
-// Schema2V1Image is a V1Image in docker/docker/image.
-type Schema2V1Image struct {
- // ID is a unique 64 character identifier of the image
- ID string `json:"id,omitempty"`
- // Parent is the ID of the parent image
- Parent string `json:"parent,omitempty"`
- // Comment is the commit message that was set when committing the image
- Comment string `json:"comment,omitempty"`
- // Created is the timestamp at which the image was created
- Created time.Time `json:"created"`
- // Container is the id of the container used to commit
- Container string `json:"container,omitempty"`
- // ContainerConfig is the configuration of the container that is committed into the image
- ContainerConfig Schema2Config `json:"container_config,omitempty"`
- // DockerVersion specifies the version of Docker that was used to build the image
- DockerVersion string `json:"docker_version,omitempty"`
- // Author is the name of the author that was specified when committing the image
- Author string `json:"author,omitempty"`
- // Config is the configuration of the container received from the client
- Config *Schema2Config `json:"config,omitempty"`
- // Architecture is the hardware that the image is build and runs on
- Architecture string `json:"architecture,omitempty"`
- // OS is the operating system used to build and run the image
- OS string `json:"os,omitempty"`
- // Size is the total size of the image including all layers it is composed of
- Size int64 `json:",omitempty"`
-}
-
-// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image.
-type Schema2RootFS struct {
- Type string `json:"type"`
- DiffIDs []digest.Digest `json:"diff_ids,omitempty"`
-}
-
-// Schema2History stores build commands that were used to create an image, from docker/docker/image.
-type Schema2History struct {
- // Created is the timestamp at which the image was created
- Created time.Time `json:"created"`
- // Author is the name of the author that was specified when committing the image
- Author string `json:"author,omitempty"`
- // CreatedBy keeps the Dockerfile command used while building the image
- CreatedBy string `json:"created_by,omitempty"`
- // Comment is the commit message that was set when committing the image
- Comment string `json:"comment,omitempty"`
- // EmptyLayer is set to true if this history item did not generate a
- // layer. Otherwise, the history item is associated with the next
- // layer in the RootFS section.
- EmptyLayer bool `json:"empty_layer,omitempty"`
-}
-
-// Schema2Image is an Image in docker/docker/image.
-type Schema2Image struct {
- Schema2V1Image
- Parent digest.Digest `json:"parent,omitempty"`
- RootFS *Schema2RootFS `json:"rootfs,omitempty"`
- History []Schema2History `json:"history,omitempty"`
- OSVersion string `json:"os.version,omitempty"`
- OSFeatures []string `json:"os.features,omitempty"`
-
- // rawJSON caches the immutable JSON associated with this image.
- rawJSON []byte
-
- // computedID is the ID computed from the hash of the image config.
- // Not to be confused with the legacy V1 ID in V1Image.
- computedID digest.Digest
-}
-
-// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob.
-func Schema2FromManifest(manifest []byte) (*Schema2, error) {
- s2 := Schema2{}
- if err := json.Unmarshal(manifest, &s2); err != nil {
- return nil, err
- }
- return &s2, nil
-}
-
-// Schema2FromComponents creates an Schema2 manifest instance from the supplied data.
-func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 {
- return &Schema2{
- SchemaVersion: 2,
- MediaType: DockerV2Schema2MediaType,
- ConfigDescriptor: config,
- LayersDescriptors: layers,
- }
-}
-
-// Schema2Clone creates a copy of the supplied Schema2 manifest.
-func Schema2Clone(src *Schema2) *Schema2 {
- copy := *src
- return &copy
-}
-
-// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
-func (m *Schema2) ConfigInfo() types.BlobInfo {
- return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size, MediaType: DockerV2Schema2ConfigMediaType}
-}
-
-// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (m *Schema2) LayerInfos() []types.BlobInfo {
- blobs := []types.BlobInfo{}
- for _, layer := range m.LayersDescriptors {
- blobs = append(blobs, types.BlobInfo{
- Digest: layer.Digest,
- Size: layer.Size,
- URLs: layer.URLs,
- MediaType: layer.MediaType,
- })
- }
- return blobs
-}
-
-// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
-func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
- if len(m.LayersDescriptors) != len(layerInfos) {
- return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos))
- }
- original := m.LayersDescriptors
- m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos))
- for i, info := range layerInfos {
- m.LayersDescriptors[i].MediaType = original[i].MediaType
- m.LayersDescriptors[i].Digest = info.Digest
- m.LayersDescriptors[i].Size = info.Size
- m.LayersDescriptors[i].URLs = info.URLs
- }
- return nil
-}
-
-// Serialize returns the manifest in a blob format.
-// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
-func (m *Schema2) Serialize() ([]byte, error) {
- return json.Marshal(*m)
-}
-
-// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
-func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
- config, err := configGetter(m.ConfigInfo())
- if err != nil {
- return nil, err
- }
- s2 := &Schema2Image{}
- if err := json.Unmarshal(config, s2); err != nil {
- return nil, err
- }
- return &types.ImageInspectInfo{
- Tag: "",
- Created: s2.Created,
- DockerVersion: s2.DockerVersion,
- Labels: s2.Config.Labels,
- Architecture: s2.Architecture,
- Os: s2.OS,
- Layers: []string{},
- }, nil
-}
diff --git a/vendor/github.com/containers/image/manifest/manifest.go b/vendor/github.com/containers/image/manifest/manifest.go
index 01f57a325..2e67763f3 100644
--- a/vendor/github.com/containers/image/manifest/manifest.go
+++ b/vendor/github.com/containers/image/manifest/manifest.go
@@ -2,9 +2,7 @@ package manifest
import (
"encoding/json"
- "fmt"
- "github.com/containers/image/types"
"github.com/docker/libtrust"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -37,34 +35,7 @@ var DefaultRequestedManifestMIMETypes = []string{
DockerV2Schema2MediaType,
DockerV2Schema1SignedMediaType,
DockerV2Schema1MediaType,
- // DockerV2ListMediaType, // FIXME: Restore this ASAP
-}
-
-// Manifest is an interface for parsing, modifying image manifests in isolation.
-// Callers can either use this abstract interface without understanding the details of the formats,
-// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members
-// directly.
-//
-// See types.Image for functionality not limited to manifests, including format conversions and config parsing.
-// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image.
-type Manifest interface {
- // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
- ConfigInfo() types.BlobInfo
- // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
- // The Digest field is guaranteed to be provided; Size may be -1.
- // WARNING: The list may contain duplicates, and they are semantically relevant.
- LayerInfos() []types.BlobInfo
- // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
- UpdateLayerInfos(layerInfos []types.BlobInfo) error
-
- // Inspect returns various information for (skopeo inspect) parsed from the manifest,
- // incorporating information from a configuration blob returned by configGetter, if
- // the underlying image format is expected to include a configuration blob.
- Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error)
-
- // Serialize returns the manifest in a blob format.
- // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
- Serialize() ([]byte, error)
+ DockerV2ListMediaType,
}
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
@@ -172,46 +143,7 @@ func AddDummyV2S1Signature(manifest []byte) ([]byte, error) {
return js.PrettySignature("signatures")
}
-// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
-// centralizing various workarounds.
-func NormalizedMIMEType(input string) string {
- switch input {
- // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
- // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
- // need to happen within the ImageSource.
- case "application/json":
- return DockerV2Schema1SignedMediaType
- case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
- imgspecv1.MediaTypeImageManifest,
- DockerV2Schema2MediaType,
- DockerV2ListMediaType:
- return input
- default:
- // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
- // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
- // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
- //
- // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
- // This makes no real sense, but it happens
- // because requests for manifests are
- // redirected to a content distribution
- // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
- return DockerV2Schema1SignedMediaType
- }
-}
-
-// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type
-func FromBlob(manblob []byte, mt string) (Manifest, error) {
- switch NormalizedMIMEType(mt) {
- case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType:
- return Schema1FromManifest(manblob)
- case imgspecv1.MediaTypeImageManifest:
- return OCI1FromManifest(manblob)
- case DockerV2Schema2MediaType:
- return Schema2FromManifest(manblob)
- case DockerV2ListMediaType:
- return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented")
- default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
- return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
- }
+// MIMETypeIsMultiImage returns true if mimeType is a list of images
+func MIMETypeIsMultiImage(mimeType string) bool {
+ return mimeType == DockerV2ListMediaType
}
diff --git a/vendor/github.com/containers/image/manifest/oci.go b/vendor/github.com/containers/image/manifest/oci.go
deleted file mode 100644
index 18e27d23c..000000000
--- a/vendor/github.com/containers/image/manifest/oci.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package manifest
-
-import (
- "encoding/json"
- "time"
-
- "github.com/containers/image/types"
- "github.com/opencontainers/image-spec/specs-go"
- imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
-)
-
-// OCI1 is a manifest.Manifest implementation for OCI images.
-// The underlying data from imgspecv1.Manifest is also available.
-type OCI1 struct {
- imgspecv1.Manifest
-}
-
-// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob.
-func OCI1FromManifest(manifest []byte) (*OCI1, error) {
- oci1 := OCI1{}
- if err := json.Unmarshal(manifest, &oci1); err != nil {
- return nil, err
- }
- return &oci1, nil
-}
-
-// OCI1FromComponents creates an OCI1 manifest instance from the supplied data.
-func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 {
- return &OCI1{
- imgspecv1.Manifest{
- Versioned: specs.Versioned{SchemaVersion: 2},
- Config: config,
- Layers: layers,
- },
- }
-}
-
-// OCI1Clone creates a copy of the supplied OCI1 manifest.
-func OCI1Clone(src *OCI1) *OCI1 {
- return &OCI1{
- Manifest: src.Manifest,
- }
-}
-
-// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
-func (m *OCI1) ConfigInfo() types.BlobInfo {
- return types.BlobInfo{Digest: m.Config.Digest, Size: m.Config.Size, Annotations: m.Config.Annotations, MediaType: imgspecv1.MediaTypeImageConfig}
-}
-
-// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (m *OCI1) LayerInfos() []types.BlobInfo {
- blobs := []types.BlobInfo{}
- for _, layer := range m.Layers {
- blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType})
- }
- return blobs
-}
-
-// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
-func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
- if len(m.Layers) != len(layerInfos) {
- return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
- }
- original := m.Layers
- m.Layers = make([]imgspecv1.Descriptor, len(layerInfos))
- for i, info := range layerInfos {
- m.Layers[i].MediaType = original[i].MediaType
- m.Layers[i].Digest = info.Digest
- m.Layers[i].Size = info.Size
- m.Layers[i].Annotations = info.Annotations
- m.Layers[i].URLs = info.URLs
- }
- return nil
-}
-
-// Serialize returns the manifest in a blob format.
-// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
-func (m *OCI1) Serialize() ([]byte, error) {
- return json.Marshal(*m)
-}
-
-// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
-func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
- config, err := configGetter(m.ConfigInfo())
- if err != nil {
- return nil, err
- }
- v1 := &imgspecv1.Image{}
- if err := json.Unmarshal(config, v1); err != nil {
- return nil, err
- }
- created := time.Time{}
- if v1.Created != nil {
- created = *v1.Created
- }
- return &types.ImageInspectInfo{
- Tag: "",
- Created: created,
- DockerVersion: "",
- Labels: v1.Config.Labels,
- Architecture: v1.Architecture,
- Os: v1.OS,
- Layers: []string{},
- }, nil
-}
diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go
index fd437f5a9..7d034a4f4 100644
--- a/vendor/github.com/containers/image/oci/archive/oci_src.go
+++ b/vendor/github.com/containers/image/oci/archive/oci_src.go
@@ -68,14 +68,12 @@ func (s *ociArchiveImageSource) Close() error {
return s.unpackedSrc.Close()
}
-// GetManifest returns the image's manifest along with its MIME type
-// (which may be empty when it can't be determined but the manifest is available).
-func (s *ociArchiveImageSource) GetManifest() ([]byte, string, error) {
- return s.unpackedSrc.GetManifest()
-}
-
-func (s *ociArchiveImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
- return s.unpackedSrc.GetTargetManifest(digest)
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *ociArchiveImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) {
+ return s.unpackedSrc.GetManifest(instanceDigest)
}
// GetBlob returns a stream for the specified blob, and the blob's size.
@@ -83,10 +81,10 @@ func (s *ociArchiveImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int
return s.unpackedSrc.GetBlob(info)
}
-func (s *ociArchiveImageSource) GetSignatures(c context.Context) ([][]byte, error) {
- return s.unpackedSrc.GetSignatures(c)
-}
-
-func (s *ociArchiveImageSource) UpdatedLayerInfos() []types.BlobInfo {
- return nil
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ return s.unpackedSrc.GetSignatures(ctx, instanceDigest)
}
diff --git a/vendor/github.com/containers/image/oci/archive/oci_transport.go b/vendor/github.com/containers/image/oci/archive/oci_transport.go
index 31b191989..c4a4fa716 100644
--- a/vendor/github.com/containers/image/oci/archive/oci_transport.go
+++ b/vendor/github.com/containers/image/oci/archive/oci_transport.go
@@ -4,13 +4,13 @@ import (
"fmt"
"io/ioutil"
"os"
- "path/filepath"
- "regexp"
"strings"
"github.com/containers/image/directory/explicitfilepath"
"github.com/containers/image/docker/reference"
"github.com/containers/image/image"
+ "github.com/containers/image/internal/tmpdir"
+ "github.com/containers/image/oci/internal"
ocilayout "github.com/containers/image/oci/layout"
"github.com/containers/image/transports"
"github.com/containers/image/types"
@@ -48,51 +48,12 @@ func (t ociArchiveTransport) ParseReference(reference string) (types.ImageRefere
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error {
- var file string
- sep := strings.SplitN(scope, ":", 2)
- file = sep[0]
-
- if len(sep) == 2 {
- image := sep[1]
- if !refRegexp.MatchString(image) {
- return errors.Errorf("Invalid image %s", image)
- }
- }
-
- if !strings.HasPrefix(file, "/") {
- return errors.Errorf("Invalid scope %s: must be an absolute path", scope)
- }
- // Refuse also "/", otherwise "/" and "" would have the same semantics,
- // and "" could be unexpectedly shadowed by the "/" entry.
- // (Note: we do allow "/:someimage", a bit ridiculous but why refuse it?)
- if scope == "/" {
- return errors.New(`Invalid scope "/": Use the generic default scope ""`)
- }
- cleaned := filepath.Clean(file)
- if cleaned != file {
- return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
- }
- return nil
+ return internal.ValidateScope(scope)
}
-// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
-const (
- separator = `(?:[-._:@+]|--)`
- alphanum = `(?:[A-Za-z0-9]+)`
- component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)`
-)
-
-var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`)
-
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
func ParseReference(reference string) (types.ImageReference, error) {
- var file, image string
- sep := strings.SplitN(reference, ":", 2)
- file = sep[0]
-
- if len(sep) == 2 {
- image = sep[1]
- }
+ file, image := internal.SplitPathAndImage(reference)
return NewReference(file, image)
}
@@ -102,14 +63,15 @@ func NewReference(file, image string) (types.ImageReference, error) {
if err != nil {
return nil, err
}
- // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
- // from being ambiguous with values of PolicyConfigurationIdentity.
- if strings.Contains(resolved, ":") {
- return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", file, image, resolved)
+
+ if err := internal.ValidateOCIPath(file); err != nil {
+ return nil, err
}
- if len(image) > 0 && !refRegexp.MatchString(image) {
- return nil, errors.Errorf("Invalid image %s", image)
+
+ if err := internal.ValidateImageName(image); err != nil {
+ return nil, err
}
+
return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil
}
@@ -154,14 +116,17 @@ func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string {
return res
}
-// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
-// The caller must call .Close() on the returned Image.
-func (ref ociArchiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref ociArchiveReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(ctx, ref)
if err != nil {
return nil, err
}
- return image.FromSource(src)
+ return image.FromSource(ctx, src)
}
// NewImageSource returns a types.ImageSource for this reference.
@@ -194,7 +159,7 @@ func (t *tempDirOCIRef) deleteTempDir() error {
// createOCIRef creates the oci reference of the image
func createOCIRef(image string) (tempDirOCIRef, error) {
- dir, err := ioutil.TempDir("/var/tmp", "oci")
+ dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "oci")
if err != nil {
return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory")
}
diff --git a/vendor/github.com/containers/image/oci/internal/oci_util.go b/vendor/github.com/containers/image/oci/internal/oci_util.go
new file mode 100644
index 000000000..c2012e50e
--- /dev/null
+++ b/vendor/github.com/containers/image/oci/internal/oci_util.go
@@ -0,0 +1,126 @@
+package internal
+
+import (
+ "github.com/pkg/errors"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
+const (
+ separator = `(?:[-._:@+]|--)`
+ alphanum = `(?:[A-Za-z0-9]+)`
+ component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)`
+)
+
+var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`)
+var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`)
+
+// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs.
+// In any other case an error is returned.
+func ValidateImageName(image string) error {
+ if len(image) == 0 {
+ return nil
+ }
+
+ var err error
+ if !refRegexp.MatchString(image) {
+ err = errors.Errorf("Invalid image %s", image)
+ }
+ return err
+}
+
+// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image.
+// Neither path nor image parts are validated at this stage.
+func SplitPathAndImage(reference string) (string, string) {
+ if runtime.GOOS == "windows" {
+ return splitPathAndImageWindows(reference)
+ }
+ return splitPathAndImageNonWindows(reference)
+}
+
+func splitPathAndImageWindows(reference string) (string, string) {
+ groups := windowsRefRegexp.FindStringSubmatch(reference)
+ // nil group means no match
+ if groups == nil {
+ return reference, ""
+ }
+
+ // we expect three elements. First one full match, second the capture group for the path and
+ // the third the capture group for the image
+ if len(groups) != 3 {
+ return reference, ""
+ }
+ return groups[1], groups[2]
+}
+
+func splitPathAndImageNonWindows(reference string) (string, string) {
+ sep := strings.SplitN(reference, ":", 2)
+ path := sep[0]
+
+ var image string
+ if len(sep) == 2 {
+ image = sep[1]
+ }
+ return path, image
+}
+
+// ValidateOCIPath takes the OCI path and validates it.
+func ValidateOCIPath(path string) error {
+ if runtime.GOOS == "windows" {
+ // On Windows we must allow for a ':' as part of the path
+ if strings.Count(path, ":") > 1 {
+ return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path)
+ }
+ } else {
+ if strings.Contains(path, ":") {
+ return errors.Errorf("Invalid OCI reference: path %s contains a colon", path)
+ }
+ }
+ return nil
+}
+
+// ValidateScope validates a policy configuration scope for an OCI transport.
+func ValidateScope(scope string) error {
+ var err error
+ if runtime.GOOS == "windows" {
+ err = validateScopeWindows(scope)
+ } else {
+ err = validateScopeNonWindows(scope)
+ }
+ if err != nil {
+ return err
+ }
+
+ cleaned := filepath.Clean(scope)
+ if cleaned != scope {
+ return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
+ }
+
+ return nil
+}
+
+func validateScopeWindows(scope string) error {
+ matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope))
+ if !matched {
+ return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
+ }
+
+ return nil
+}
+
+func validateScopeNonWindows(scope string) error {
+ if !strings.HasPrefix(scope, "/") {
+ return errors.Errorf("Invalid scope %s: must be an absolute path", scope)
+ }
+
+ // Refuse also "/", otherwise "/" and "" would have the same semantics,
+ // and "" could be unexpectedly shadowed by the "/" entry.
+ if scope == "/" {
+ return errors.New(`Invalid scope "/": Use the generic default scope ""`)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go
index 4c6d349ed..e95f65167 100644
--- a/vendor/github.com/containers/image/oci/layout/oci_dest.go
+++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go
@@ -18,12 +18,13 @@ import (
)
type ociImageDestination struct {
- ref ociReference
- index imgspecv1.Index
+ ref ociReference
+ index imgspecv1.Index
+ sharedBlobDir string
}
// newImageDestination returns an ImageDestination for writing to an existing directory.
-func newImageDestination(ref ociReference) (types.ImageDestination, error) {
+func newImageDestination(ctx *types.SystemContext, ref ociReference) (types.ImageDestination, error) {
if ref.image == "" {
return nil, errors.Errorf("cannot save image with empty image.ref.name")
}
@@ -43,7 +44,21 @@ func newImageDestination(ref ociReference) (types.ImageDestination, error) {
}
}
- return &ociImageDestination{ref: ref, index: *index}, nil
+ d := &ociImageDestination{ref: ref, index: *index}
+ if ctx != nil {
+ d.sharedBlobDir = ctx.OCISharedBlobDirPath
+ }
+
+ if err := ensureDirectoryExists(d.ref.dir); err != nil {
+ return nil, err
+ }
+ // Per the OCI image specification, layouts MUST have a "blobs" subdirectory,
+ // but it MAY be empty (e.g. if we never end up calling PutBlob)
+ // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19
+ if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil {
+ return nil, err
+ }
+ return d, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
@@ -92,16 +107,16 @@ func (d *ociImageDestination) MustMatchRuntimeOS() bool {
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
- if err := ensureDirectoryExists(d.ref.dir); err != nil {
- return types.BlobInfo{}, err
- }
blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
if err != nil {
return types.BlobInfo{}, err
}
succeeded := false
+ explicitClosed := false
defer func() {
- blobFile.Close()
+ if !explicitClosed {
+ blobFile.Close()
+ }
if !succeeded {
os.Remove(blobFile.Name())
}
@@ -121,17 +136,28 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err
}
- if err := blobFile.Chmod(0644); err != nil {
- return types.BlobInfo{}, err
+
+ // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
+ // On Windows, the “permissions of newly created files” argument to syscall.Open is
+ // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod,
+ // always fails on Windows.
+ if runtime.GOOS != "windows" {
+ if err := blobFile.Chmod(0644); err != nil {
+ return types.BlobInfo{}, err
+ }
}
- blobPath, err := d.ref.blobPath(computedDigest)
+ blobPath, err := d.ref.blobPath(computedDigest, d.sharedBlobDir)
if err != nil {
return types.BlobInfo{}, err
}
if err := ensureParentDirectoryExists(blobPath); err != nil {
return types.BlobInfo{}, err
}
+
+ // need to explicitly close the file, since a rename won't otherwise not work on Windows
+ blobFile.Close()
+ explicitClosed = true
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
return types.BlobInfo{}, err
}
@@ -147,7 +173,7 @@ func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error)
if info.Digest == "" {
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
}
- blobPath, err := d.ref.blobPath(info.Digest)
+ blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
if err != nil {
return false, -1, err
}
@@ -180,7 +206,7 @@ func (d *ociImageDestination) PutManifest(m []byte) error {
desc.MediaType = imgspecv1.MediaTypeImageManifest
desc.Size = int64(len(m))
- blobPath, err := d.ref.blobPath(digest)
+ blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go
index 67f0c3b82..f0dac34e5 100644
--- a/vendor/github.com/containers/image/oci/layout/oci_src.go
+++ b/vendor/github.com/containers/image/oci/layout/oci_src.go
@@ -17,9 +17,10 @@ import (
)
type ociImageSource struct {
- ref ociReference
- descriptor imgspecv1.Descriptor
- client *http.Client
+ ref ociReference
+ descriptor imgspecv1.Descriptor
+ client *http.Client
+ sharedBlobDir string
}
// newImageSource returns an ImageSource for reading from an existing directory.
@@ -40,7 +41,12 @@ func newImageSource(ctx *types.SystemContext, ref ociReference) (types.ImageSour
if err != nil {
return nil, err
}
- return &ociImageSource{ref: ref, descriptor: descriptor, client: client}, nil
+ d := &ociImageSource{ref: ref, descriptor: descriptor, client: client}
+ if ctx != nil {
+ // TODO(jonboulle): check dir existence?
+ d.sharedBlobDir = ctx.OCISharedBlobDirPath
+ }
+ return d, nil
}
// Reference returns the reference used to set up this source.
@@ -55,35 +61,35 @@ func (s *ociImageSource) Close() error {
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
-func (s *ociImageSource) GetManifest() ([]byte, string, error) {
- manifestPath, err := s.ref.blobPath(digest.Digest(s.descriptor.Digest))
- if err != nil {
- return nil, "", err
- }
- m, err := ioutil.ReadFile(manifestPath)
- if err != nil {
- return nil, "", err
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *ociImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) {
+ var dig digest.Digest
+ var mimeType string
+ if instanceDigest == nil {
+ dig = digest.Digest(s.descriptor.Digest)
+ mimeType = s.descriptor.MediaType
+ } else {
+ dig = *instanceDigest
+ // XXX: instanceDigest means that we don't immediately have the context of what
+ // mediaType the manifest has. In OCI this means that we don't know
+ // what reference it came from, so we just *assume* that its
+ // MediaTypeImageManifest.
+ // FIXME: We should actually be able to look up the manifest in the index,
+ // and see the MIME type there.
+ mimeType = imgspecv1.MediaTypeImageManifest
}
- return m, s.descriptor.MediaType, nil
-}
-
-func (s *ociImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
- manifestPath, err := s.ref.blobPath(digest)
+ manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir)
if err != nil {
return nil, "", err
}
-
m, err := ioutil.ReadFile(manifestPath)
if err != nil {
return nil, "", err
}
- // XXX: GetTargetManifest means that we don't have the context of what
- // mediaType the manifest has. In OCI this means that we don't know
- // what reference it came from, so we just *assume* that its
- // MediaTypeImageManifest.
- return m, imgspecv1.MediaTypeImageManifest, nil
+ return m, mimeType, nil
}
// GetBlob returns a stream for the specified blob, and the blob's size.
@@ -92,7 +98,7 @@ func (s *ociImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, err
return s.getExternalBlob(info.URLs)
}
- path, err := s.ref.blobPath(info.Digest)
+ path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir)
if err != nil {
return nil, 0, err
}
@@ -108,7 +114,11 @@ func (s *ociImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, err
return r, fi.Size(), nil
}
-func (s *ociImageSource) GetSignatures(context.Context) ([][]byte, error) {
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
return [][]byte{}, nil
}
@@ -133,11 +143,6 @@ func (s *ociImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, e
return nil, 0, errWrap
}
-// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
-func (s *ociImageSource) UpdatedLayerInfos() []types.BlobInfo {
- return nil
-}
-
func getBlobSize(resp *http.Response) int64 {
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
diff --git a/vendor/github.com/containers/image/oci/layout/oci_transport.go b/vendor/github.com/containers/image/oci/layout/oci_transport.go
index 77730f390..c181c4c77 100644
--- a/vendor/github.com/containers/image/oci/layout/oci_transport.go
+++ b/vendor/github.com/containers/image/oci/layout/oci_transport.go
@@ -5,12 +5,12 @@ import (
"fmt"
"os"
"path/filepath"
- "regexp"
"strings"
"github.com/containers/image/directory/explicitfilepath"
"github.com/containers/image/docker/reference"
"github.com/containers/image/image"
+ "github.com/containers/image/oci/internal"
"github.com/containers/image/transports"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
@@ -36,45 +36,12 @@ func (t ociTransport) ParseReference(reference string) (types.ImageReference, er
return ParseReference(reference)
}
-// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
-const (
- separator = `(?:[-._:@+]|--)`
- alphanum = `(?:[A-Za-z0-9]+)`
- component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)`
-)
-
-var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`)
-
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
// scope passed to this function will not be "", that value is always allowed.
func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error {
- var dir string
- sep := strings.SplitN(scope, ":", 2)
- dir = sep[0]
-
- if len(sep) == 2 {
- image := sep[1]
- if !refRegexp.MatchString(image) {
- return errors.Errorf("Invalid image %s", image)
- }
- }
-
- if !strings.HasPrefix(dir, "/") {
- return errors.Errorf("Invalid scope %s: must be an absolute path", scope)
- }
- // Refuse also "/", otherwise "/" and "" would have the same semantics,
- // and "" could be unexpectedly shadowed by the "/" entry.
- // (Note: we do allow "/:someimage", a bit ridiculous but why refuse it?)
- if scope == "/" {
- return errors.New(`Invalid scope "/": Use the generic default scope ""`)
- }
- cleaned := filepath.Clean(dir)
- if cleaned != dir {
- return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
- }
- return nil
+ return internal.ValidateScope(scope)
}
// ociReference is an ImageReference for OCI directory paths.
@@ -92,13 +59,7 @@ type ociReference struct {
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
func ParseReference(reference string) (types.ImageReference, error) {
- var dir, image string
- sep := strings.SplitN(reference, ":", 2)
- dir = sep[0]
-
- if len(sep) == 2 {
- image = sep[1]
- }
+ dir, image := internal.SplitPathAndImage(reference)
return NewReference(dir, image)
}
@@ -111,14 +72,15 @@ func NewReference(dir, image string) (types.ImageReference, error) {
if err != nil {
return nil, err
}
- // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
- // from being ambiguous with values of PolicyConfigurationIdentity.
- if strings.Contains(resolved, ":") {
- return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, image, resolved)
+
+ if err := internal.ValidateOCIPath(dir); err != nil {
+ return nil, err
}
- if len(image) > 0 && !refRegexp.MatchString(image) {
- return nil, errors.Errorf("Invalid image %s", image)
+
+ if err = internal.ValidateImageName(image); err != nil {
+ return nil, err
}
+
return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil
}
@@ -177,16 +139,17 @@ func (ref ociReference) PolicyConfigurationNamespaces() []string {
return res
}
-// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
-// The caller must call .Close() on the returned Image.
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
-func (ref ociReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref ociReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(ctx, ref)
if err != nil {
return nil, err
}
- return image.FromSource(src)
+ return image.FromSource(ctx, src)
}
// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together
@@ -261,7 +224,7 @@ func (ref ociReference) NewImageSource(ctx *types.SystemContext) (types.ImageSou
// NewImageDestination returns a types.ImageDestination for this reference.
// The caller must call .Close() on the returned ImageDestination.
func (ref ociReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
- return newImageDestination(ref)
+ return newImageDestination(ctx, ref)
}
// DeleteImage deletes the named image from the registry, if supported.
@@ -280,9 +243,13 @@ func (ref ociReference) indexPath() string {
}
// blobPath returns a path for a blob within a directory using OCI image-layout conventions.
-func (ref ociReference) blobPath(digest digest.Digest) (string, error) {
+func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) {
if err := digest.Validate(); err != nil {
return "", errors.Wrapf(err, "unexpected digest reference %s", digest)
}
- return filepath.Join(ref.dir, "blobs", digest.Algorithm().String(), digest.Hex()), nil
+ blobDir := filepath.Join(ref.dir, "blobs")
+ if sharedBlobDir != "" {
+ blobDir = sharedBlobDir
+ }
+ return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil
}
diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go
index 794521b14..33d1a2bf2 100644
--- a/vendor/github.com/containers/image/openshift/openshift.go
+++ b/vendor/github.com/containers/image/openshift/openshift.go
@@ -200,20 +200,15 @@ func (s *openshiftImageSource) Close() error {
return nil
}
-func (s *openshiftImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
- if err := s.ensureImageIsResolved(context.TODO()); err != nil {
- return nil, "", err
- }
- return s.docker.GetTargetManifest(digest)
-}
-
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
-func (s *openshiftImageSource) GetManifest() ([]byte, string, error) {
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *openshiftImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) {
if err := s.ensureImageIsResolved(context.TODO()); err != nil {
return nil, "", err
}
- return s.docker.GetManifest()
+ return s.docker.GetManifest(instanceDigest)
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
@@ -224,12 +219,21 @@ func (s *openshiftImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int6
return s.docker.GetBlob(info)
}
-func (s *openshiftImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
- if err := s.ensureImageIsResolved(ctx); err != nil {
- return nil, err
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ var imageName string
+ if instanceDigest == nil {
+ if err := s.ensureImageIsResolved(ctx); err != nil {
+ return nil, err
+ }
+ imageName = s.imageStreamImageName
+ } else {
+ imageName = instanceDigest.String()
}
-
- image, err := s.client.getImage(ctx, s.imageStreamImageName)
+ image, err := s.client.getImage(ctx, imageName)
if err != nil {
return nil, err
}
@@ -242,10 +246,6 @@ func (s *openshiftImageSource) GetSignatures(ctx context.Context) ([][]byte, err
return sigs, nil
}
-func (s *openshiftImageSource) UpdatedLayerInfos() []types.BlobInfo {
- return nil
-}
-
// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error {
if s.docker != nil {
diff --git a/vendor/github.com/containers/image/openshift/openshift_transport.go b/vendor/github.com/containers/image/openshift/openshift_transport.go
index 7db35d96e..686d806f7 100644
--- a/vendor/github.com/containers/image/openshift/openshift_transport.go
+++ b/vendor/github.com/containers/image/openshift/openshift_transport.go
@@ -125,16 +125,17 @@ func (ref openshiftReference) PolicyConfigurationNamespaces() []string {
return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference)
}
-// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
-// The caller must call .Close() on the returned Image.
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
-func (ref openshiftReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref openshiftReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(ctx, ref)
if err != nil {
return nil, err
}
- return genericImage.FromSource(src)
+ return genericImage.FromSource(ctx, src)
}
// NewImageSource returns a types.ImageSource for this reference.
diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go
index 26137431d..704e1ecee 100644
--- a/vendor/github.com/containers/image/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/ostree/ostree_dest.go
@@ -4,6 +4,8 @@ package ostree
import (
"bytes"
+ "compress/gzip"
+ "encoding/base64"
"encoding/json"
"fmt"
"io"
@@ -12,18 +14,27 @@ import (
"os/exec"
"path/filepath"
"strconv"
- "strings"
"time"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/containers/storage/pkg/archive"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
-
"github.com/ostreedev/ostree-go/pkg/otbuiltin"
+ "github.com/pkg/errors"
+ "github.com/vbatts/tar-split/tar/asm"
+ "github.com/vbatts/tar-split/tar/storage"
)
+// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1
+// #include <glib.h>
+// #include <glib-object.h>
+// #include <gio/gio.h>
+// #include <stdlib.h>
+// #include <ostree.h>
+// #include <gio/ginputstream.h>
+import "C"
+
type blobToImport struct {
Size int64
Digest digest.Digest
@@ -35,18 +46,24 @@ type descriptor struct {
Digest digest.Digest `json:"digest"`
}
+type fsLayersSchema1 struct {
+ BlobSum digest.Digest `json:"blobSum"`
+}
+
type manifestSchema struct {
- ConfigDescriptor descriptor `json:"config"`
- LayersDescriptors []descriptor `json:"layers"`
+ LayersDescriptors []descriptor `json:"layers"`
+ FSLayers []fsLayersSchema1 `json:"fsLayers"`
}
type ostreeImageDestination struct {
- ref ostreeReference
- manifest string
- schema manifestSchema
- tmpDirPath string
- blobs map[string]*blobToImport
- digest digest.Digest
+ ref ostreeReference
+ manifest string
+ schema manifestSchema
+ tmpDirPath string
+ blobs map[string]*blobToImport
+ digest digest.Digest
+ signaturesLen int
+ repo *C.struct_OstreeRepo
}
// newImageDestination returns an ImageDestination for writing to an existing ostree.
@@ -55,7 +72,7 @@ func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDes
if err := ensureDirectoryExists(tmpDirPath); err != nil {
return nil, err
}
- return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, ""}, nil
+ return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
@@ -66,6 +83,9 @@ func (d *ostreeImageDestination) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *ostreeImageDestination) Close() error {
+ if d.repo != nil {
+ C.g_object_unref(C.gpointer(d.repo))
+ }
return os.RemoveAll(d.tmpDirPath)
}
@@ -174,6 +194,35 @@ func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch strin
return err
}
+func generateTarSplitMetadata(output *bytes.Buffer, file string) error {
+ mfz := gzip.NewWriter(output)
+ defer mfz.Close()
+ metaPacker := storage.NewJSONPacker(mfz)
+
+ stream, err := os.OpenFile(file, os.O_RDONLY, 0)
+ if err != nil {
+ return err
+ }
+ defer stream.Close()
+
+ gzReader, err := gzip.NewReader(stream)
+ if err != nil {
+ return err
+ }
+ defer gzReader.Close()
+
+ its, err := asm.NewInputTarStream(gzReader, metaPacker, nil)
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(ioutil.Discard, its)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToImport) error {
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root")
@@ -185,6 +234,11 @@ func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToIm
os.RemoveAll(destinationPath)
}()
+ var tarSplitOutput bytes.Buffer
+ if err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath); err != nil {
+ return err
+ }
+
if os.Getuid() == 0 {
if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil {
return err
@@ -202,28 +256,35 @@ func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToIm
return err
}
}
- return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
+ return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size),
+ fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))})
+
}
-func (d *ostreeImageDestination) importConfig(blob *blobToImport) error {
+func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error {
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
+ destinationPath := filepath.Dir(blob.BlobPath)
- return exec.Command("ostree", "commit",
- "--repo", d.ref.repo,
- fmt.Sprintf("--add-metadata-string=docker.size=%d", blob.Size),
- "--branch", ostreeBranch, filepath.Dir(blob.BlobPath)).Run()
+ return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
}
func (d *ostreeImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
- branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
- output, err := exec.Command("ostree", "show", "--repo", d.ref.repo, "--print-metadata-key=docker.size", branch).CombinedOutput()
- if err != nil {
- if bytes.Index(output, []byte("not found")) >= 0 || bytes.Index(output, []byte("No such")) >= 0 {
- return false, -1, nil
+
+ if d.repo == nil {
+ repo, err := openRepo(d.ref.repo)
+ if err != nil {
+ return false, 0, err
}
- return false, -1, err
+ d.repo = repo
+ }
+ branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
+
+ found, data, err := readMetadata(d.repo, branch, "docker.size")
+ if err != nil || !found {
+ return found, -1, err
}
- size, err := strconv.ParseInt(strings.Trim(string(output), "'\n"), 10, 64)
+
+ size, err := strconv.ParseInt(data, 10, 64)
if err != nil {
return false, -1, err
}
@@ -272,6 +333,7 @@ func (d *ostreeImageDestination) PutSignatures(signatures [][]byte) error {
return err
}
}
+ d.signaturesLen = len(signatures)
return nil
}
@@ -286,24 +348,37 @@ func (d *ostreeImageDestination) Commit() error {
return err
}
- for _, layer := range d.schema.LayersDescriptors {
- hash := layer.Digest.Hex()
+ checkLayer := func(hash string) error {
blob := d.blobs[hash]
// if the blob is not present in d.blobs then it is already stored in OSTree,
// and we don't need to import it.
if blob == nil {
- continue
+ return nil
}
err := d.importBlob(repo, blob)
if err != nil {
return err
}
+
+ delete(d.blobs, hash)
+ return nil
+ }
+ for _, layer := range d.schema.LayersDescriptors {
+ hash := layer.Digest.Hex()
+ if err = checkLayer(hash); err != nil {
+ return err
+ }
+ }
+ for _, layer := range d.schema.FSLayers {
+ hash := layer.BlobSum.Hex()
+ if err = checkLayer(hash); err != nil {
+ return err
+ }
}
- hash := d.schema.ConfigDescriptor.Digest.Hex()
- blob := d.blobs[hash]
- if blob != nil {
- err := d.importConfig(blob)
+ // Import the other blobs that are not layers
+ for _, blob := range d.blobs {
+ err := d.importConfig(repo, blob)
if err != nil {
return err
}
@@ -311,7 +386,9 @@ func (d *ostreeImageDestination) Commit() error {
manifestPath := filepath.Join(d.tmpDirPath, "manifest")
- metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), fmt.Sprintf("docker.digest=%s", string(d.digest))}
+ metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)),
+ fmt.Sprintf("signatures=%d", d.signaturesLen),
+ fmt.Sprintf("docker.digest=%s", string(d.digest))}
err = d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata)
_, err = repo.CommitTransaction()
diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go
new file mode 100644
index 000000000..3012da1af
--- /dev/null
+++ b/vendor/github.com/containers/image/ostree/ostree_src.go
@@ -0,0 +1,349 @@
+// +build !containers_image_ostree_stub
+
+package ostree
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strconv"
+ "strings"
+ "unsafe"
+
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/opencontainers/go-digest"
+ glib "github.com/ostreedev/ostree-go/pkg/glibobject"
+ "github.com/pkg/errors"
+ "github.com/vbatts/tar-split/tar/asm"
+ "github.com/vbatts/tar-split/tar/storage"
+)
+
+// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1
+// #include <glib.h>
+// #include <glib-object.h>
+// #include <gio/gio.h>
+// #include <stdlib.h>
+// #include <ostree.h>
+// #include <gio/ginputstream.h>
+import "C"
+
+type ostreeImageSource struct {
+ ref ostreeReference
+ tmpDir string
+ repo *C.struct_OstreeRepo
+}
+
+// newImageSource returns an ImageSource for reading from an existing directory.
+func newImageSource(ctx *types.SystemContext, tmpDir string, ref ostreeReference) (types.ImageSource, error) {
+ return &ostreeImageSource{ref: ref, tmpDir: tmpDir}, nil
+}
+
+// Reference returns the reference used to set up this source.
+func (s *ostreeImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *ostreeImageSource) Close() error {
+ if s.repo != nil {
+ C.g_object_unref(C.gpointer(s.repo))
+ }
+ return nil
+}
+
+func (s *ostreeImageSource) getLayerSize(blob string) (int64, error) {
+ b := fmt.Sprintf("ociimage/%s", blob)
+ found, data, err := readMetadata(s.repo, b, "docker.size")
+ if err != nil || !found {
+ return 0, err
+ }
+ return strconv.ParseInt(data, 10, 64)
+}
+
+func (s *ostreeImageSource) getLenSignatures() (int64, error) {
+ b := fmt.Sprintf("ociimage/%s", s.ref.branchName)
+ found, data, err := readMetadata(s.repo, b, "signatures")
+ if err != nil {
+ return -1, err
+ }
+ if !found {
+ // if 'signatures' is not present, just return 0 signatures.
+ return 0, nil
+ }
+ return strconv.ParseInt(data, 10, 64)
+}
+
+func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) {
+ b := fmt.Sprintf("ociimage/%s", blob)
+ found, out, err := readMetadata(s.repo, b, "tarsplit.output")
+ if err != nil || !found {
+ return nil, err
+ }
+ return base64.StdEncoding.DecodeString(out)
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+func (s *ostreeImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ return nil, "", errors.Errorf(`Manifest lists are not supported by "ostree:"`)
+ }
+ if s.repo == nil {
+ repo, err := openRepo(s.ref.repo)
+ if err != nil {
+ return nil, "", err
+ }
+ s.repo = repo
+ }
+
+ b := fmt.Sprintf("ociimage/%s", s.ref.branchName)
+ found, out, err := readMetadata(s.repo, b, "docker.manifest")
+ if err != nil {
+ return nil, "", err
+ }
+ if !found {
+ return nil, "", errors.New("manifest not found")
+ }
+ m := []byte(out)
+ return m, manifest.GuessMIMEType(m), nil
+}
+
+func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
+ return nil, "", errors.New("manifest lists are not supported by this transport")
+}
+
+func openRepo(path string) (*C.struct_OstreeRepo, error) {
+ var cerr *C.GError
+ cpath := C.CString(path)
+ defer C.free(unsafe.Pointer(cpath))
+ pathc := C.g_file_new_for_path(cpath)
+ defer C.g_object_unref(C.gpointer(pathc))
+ repo := C.ostree_repo_new(pathc)
+ r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr)))
+ if !r {
+ C.g_object_unref(C.gpointer(repo))
+ return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
+ }
+ return repo, nil
+}
+
+type ostreePathFileGetter struct {
+ repo *C.struct_OstreeRepo
+ parentRoot *C.GFile
+}
+
+type ostreeReader struct {
+ stream *C.GFileInputStream
+}
+
+func (o ostreeReader) Close() error {
+ C.g_object_unref(C.gpointer(o.stream))
+ return nil
+}
+func (o ostreeReader) Read(p []byte) (int, error) {
+ var cerr *C.GError
+ instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type())
+ stream := (*C.GInputStream)(unsafe.Pointer(instanceCast))
+
+ b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr)
+ if b == nil {
+ return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
+ }
+ defer C.g_bytes_unref(b)
+
+ count := int(C.g_bytes_get_size(b))
+ if count == 0 {
+ return 0, io.EOF
+ }
+ data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count]
+ copy(p, data)
+ return count, nil
+}
+
+func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) {
+ var cerr *C.GError
+ var ref *C.char
+ defer C.free(unsafe.Pointer(ref))
+
+ cCommit := C.CString(commit)
+ defer C.free(unsafe.Pointer(cCommit))
+
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) {
+ return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
+ }
+
+ if ref == nil {
+ return false, "", nil
+ }
+
+ var variant *C.GVariant
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) {
+ return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
+ }
+ defer C.g_variant_unref(variant)
+ if variant != nil {
+ cKey := C.CString(key)
+ defer C.free(unsafe.Pointer(cKey))
+
+ metadata := C.g_variant_get_child_value(variant, 0)
+ defer C.g_variant_unref(metadata)
+
+ data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil)
+ if data != nil {
+ defer C.g_variant_unref(data)
+ ptr := (*C.char)(C.g_variant_get_string(data, nil))
+ val := C.GoString(ptr)
+ return true, val, nil
+ }
+ }
+ return false, "", nil
+}
+
+func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) {
+ var cerr *C.GError
+ var parentRoot *C.GFile
+ cCommit := C.CString(commit)
+ defer C.free(unsafe.Pointer(cCommit))
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) {
+ return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
+ }
+
+ C.g_object_ref(C.gpointer(repo))
+
+ return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil
+}
+
+func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) {
+ var file *C.GFile
+ if strings.HasPrefix(filename, "./") {
+ filename = filename[2:]
+ }
+ cfilename := C.CString(filename)
+ defer C.free(unsafe.Pointer(cfilename))
+
+ file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename))
+
+ var cerr *C.GError
+ stream := C.g_file_read(file, nil, &cerr)
+ if stream == nil {
+ return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
+ }
+
+ return &ostreeReader{stream: stream}, nil
+}
+
+func (o ostreePathFileGetter) Close() {
+ C.g_object_unref(C.gpointer(o.repo))
+ C.g_object_unref(C.gpointer(o.parentRoot))
+}
+
+func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) {
+ getter, err := newOSTreePathFileGetter(s.repo, commit)
+ if err != nil {
+ return nil, err
+ }
+ defer getter.Close()
+
+ return getter.Get(path)
+}
+
+// GetBlob returns a stream for the specified blob, and the blob's size.
+func (s *ostreeImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
+ blob := info.Digest.Hex()
+ branch := fmt.Sprintf("ociimage/%s", blob)
+
+ if s.repo == nil {
+ repo, err := openRepo(s.ref.repo)
+ if err != nil {
+ return nil, 0, err
+ }
+ s.repo = repo
+ }
+
+ layerSize, err := s.getLayerSize(blob)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ tarsplit, err := s.getTarSplitData(blob)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ // if tarsplit is nil we are looking at the manifest. Return directly the file in /content
+ if tarsplit == nil {
+ file, err := s.readSingleFile(branch, "/content")
+ if err != nil {
+ return nil, 0, err
+ }
+ return file, layerSize, nil
+ }
+
+ mf := bytes.NewReader(tarsplit)
+ mfz, err := gzip.NewReader(mf)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer mfz.Close()
+ metaUnpacker := storage.NewJSONUnpacker(mfz)
+
+ getter, err := newOSTreePathFileGetter(s.repo, branch)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ ots := asm.NewOutputTarStream(getter, metaUnpacker)
+
+ pipeReader, pipeWriter := io.Pipe()
+ go func() {
+ io.Copy(pipeWriter, ots)
+ pipeWriter.Close()
+ }()
+
+ rc := ioutils.NewReadCloserWrapper(pipeReader, func() error {
+ getter.Close()
+ return ots.Close()
+ })
+ return rc, layerSize, nil
+}
+
+func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ if instanceDigest != nil {
+ return nil, errors.New("manifest lists are not supported by this transport")
+ }
+ lenSignatures, err := s.getLenSignatures()
+ if err != nil {
+ return nil, err
+ }
+ branch := fmt.Sprintf("ociimage/%s", s.ref.branchName)
+
+ if s.repo == nil {
+ repo, err := openRepo(s.ref.repo)
+ if err != nil {
+ return nil, err
+ }
+ s.repo = repo
+ }
+
+ signatures := [][]byte{}
+ for i := int64(1); i <= lenSignatures; i++ {
+ sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i))
+ if err != nil {
+ return nil, err
+ }
+ defer sigReader.Close()
+
+ sig, err := ioutil.ReadAll(sigReader)
+ if err != nil {
+ return nil, err
+ }
+ signatures = append(signatures, sig)
+ }
+ return signatures, nil
+}
diff --git a/vendor/github.com/containers/image/ostree/ostree_transport.go b/vendor/github.com/containers/image/ostree/ostree_transport.go
index 0de74a71d..cc85a43ff 100644
--- a/vendor/github.com/containers/image/ostree/ostree_transport.go
+++ b/vendor/github.com/containers/image/ostree/ostree_transport.go
@@ -10,12 +10,12 @@ import (
"regexp"
"strings"
- "github.com/pkg/errors"
-
"github.com/containers/image/directory/explicitfilepath"
"github.com/containers/image/docker/reference"
+ "github.com/containers/image/image"
"github.com/containers/image/transports"
"github.com/containers/image/types"
+ "github.com/pkg/errors"
)
const defaultOSTreeRepo = "/ostree/repo"
@@ -66,6 +66,11 @@ type ostreeReference struct {
repo string
}
+type ostreeImageCloser struct {
+ types.ImageCloser
+ size int64
+}
+
func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) {
var repo = ""
var image = ""
@@ -110,7 +115,7 @@ func NewReference(image string, repo string) (types.ImageReference, error) {
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
// from being ambiguous with values of PolicyConfigurationIdentity.
if strings.Contains(resolved, ":") {
- return nil, errors.Errorf("Invalid OSTreeCI reference %s@%s: path %s contains a colon", image, repo, resolved)
+ return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved)
}
return ostreeReference{
@@ -168,18 +173,38 @@ func (ref ostreeReference) PolicyConfigurationNamespaces() []string {
return res
}
-// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
-// The caller must call .Close() on the returned Image.
+func (s *ostreeImageCloser) Size() (int64, error) {
+ return s.size, nil
+}
+
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
-func (ref ostreeReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
- return nil, errors.New("Reading ostree: images is currently not supported")
+func (ref ostreeReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) {
+ var tmpDir string
+ if ctx == nil || ctx.OSTreeTmpDirPath == "" {
+ tmpDir = os.TempDir()
+ } else {
+ tmpDir = ctx.OSTreeTmpDirPath
+ }
+ src, err := newImageSource(ctx, tmpDir, ref)
+ if err != nil {
+ return nil, err
+ }
+ return image.FromSource(ctx, src)
}
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
func (ref ostreeReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
- return nil, errors.New("Reading ostree: images is currently not supported")
+ var tmpDir string
+ if ctx == nil || ctx.OSTreeTmpDirPath == "" {
+ tmpDir = os.TempDir()
+ } else {
+ tmpDir = ctx.OSTreeTmpDirPath
+ }
+ return newImageSource(ctx, tmpDir, ref)
}
// NewImageDestination returns a types.ImageDestination for this reference.
diff --git a/vendor/github.com/containers/image/signature/policy_config.go b/vendor/github.com/containers/image/signature/policy_config.go
index bc6c5e9a7..42cc12ab1 100644
--- a/vendor/github.com/containers/image/signature/policy_config.go
+++ b/vendor/github.com/containers/image/signature/policy_config.go
@@ -70,7 +70,11 @@ func NewPolicyFromFile(fileName string) (*Policy, error) {
if err != nil {
return nil, err
}
- return NewPolicyFromBytes(contents)
+ policy, err := NewPolicyFromBytes(contents)
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid policy in %q", fileName)
+ }
+ return policy, nil
}
// NewPolicyFromBytes returns a policy parsed from the specified blob.
diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go
index 46d1057de..89c0264b9 100644
--- a/vendor/github.com/containers/image/storage/storage_image.go
+++ b/vendor/github.com/containers/image/storage/storage_image.go
@@ -6,13 +6,11 @@ import (
"bytes"
"context"
"encoding/json"
- "fmt"
"io"
"io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "sync/atomic"
+ "time"
+
+ "github.com/pkg/errors"
"github.com/containers/image/image"
"github.com/containers/image/manifest"
@@ -20,15 +18,10 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/ioutils"
- "github.com/docker/docker/api/types/versions"
- digest "github.com/opencontainers/go-digest"
- imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
+ ddigest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
-const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
-
var (
// ErrBlobDigestMismatch is returned when PutBlob() is given a blob
// with a digest-based name that doesn't match its contents.
@@ -36,7 +29,8 @@ var (
// ErrBlobSizeMismatch is returned when PutBlob() is given a blob
// with an expected size that doesn't match the reader.
ErrBlobSizeMismatch = errors.New("blob size mismatch")
- // ErrNoManifestLists is returned when GetTargetManifest() is called.
+ // ErrNoManifestLists is returned when GetManifest() is called.
+ // with a non-nil instanceDigest.
ErrNoManifestLists = errors.New("manifest lists are not supported by this transport")
// ErrNoSuchImage is returned when we attempt to access an image which
// doesn't exist in the storage area.
@@ -45,43 +39,52 @@ var (
type storageImageSource struct {
imageRef storageReference
- ID string
- layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
- cachedManifest []byte // A cached copy of the manifest, if already known, or nil
- SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
+ Tag string `json:"tag,omitempty"`
+ Created time.Time `json:"created-time,omitempty"`
+ ID string `json:"id"`
+ BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle
+ Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs
+ LayerPosition map[ddigest.Digest]int `json:"-"` // Where we are in reading a blob's layers
+ SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice
}
type storageImageDestination struct {
- imageRef storageReference // The reference we'll use to name the image
- publicRef storageReference // The reference we return when asked about the name we'll give to the image
- directory string // Temporary directory where we store blobs until Commit() time
- nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
- manifest []byte // Manifest contents, temporary
- signatures []byte // Signature contents, temporary
- blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
- fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
- filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
- SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
+ imageRef storageReference
+ Tag string `json:"tag,omitempty"`
+ Created time.Time `json:"created-time,omitempty"`
+ ID string `json:"id"`
+ BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle
+ Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs
+ BlobData map[ddigest.Digest][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary
+ Manifest []byte `json:"-"` // Manifest contents, temporary
+ Signatures []byte `json:"-"` // Signature contents, temporary
+ SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice
+}
+
+type storageLayerMetadata struct {
+ Digest string `json:"digest,omitempty"`
+ Size int64 `json:"size"`
+ CompressedSize int64 `json:"compressed-size,omitempty"`
}
-type storageImage struct {
- types.Image
+type storageImageCloser struct {
+ types.ImageCloser
size int64
}
-// newImageSource sets up an image for reading.
+// newImageSource sets us up to read out an image, which needs to already exist.
func newImageSource(imageRef storageReference) (*storageImageSource, error) {
- // First, locate the image.
img, err := imageRef.resolveImage()
if err != nil {
return nil, err
}
-
- // Build the reader object.
image := &storageImageSource{
imageRef: imageRef,
+ Created: time.Now(),
ID: img.ID,
- layerPosition: make(map[digest.Digest]int),
+ BlobList: []types.BlobInfo{},
+ Layers: make(map[ddigest.Digest][]string),
+ LayerPosition: make(map[ddigest.Digest]int),
SignatureSizes: []int{},
}
if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
@@ -90,266 +93,202 @@ func newImageSource(imageRef storageReference) (*storageImageSource, error) {
return image, nil
}
-// Reference returns the image reference that we used to find this image.
-func (s storageImageSource) Reference() types.ImageReference {
- return s.imageRef
-}
-
-// Close cleans up any resources we tied up while reading the image.
-func (s storageImageSource) Close() error {
- return nil
-}
-
-// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given.
-func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
- rc, n, _, err = s.getBlobAndLayerID(info)
- return rc, n, err
-}
-
-// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given.
-func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) {
- var layer storage.Layer
- var diffOptions *storage.DiffOptions
- // We need a valid digest value.
- err = info.Digest.Validate()
- if err != nil {
- return nil, -1, "", err
- }
- // Check if the blob corresponds to a diff that was used to initialize any layers. Our
- // callers should try to retrieve layers using their uncompressed digests, so no need to
- // check if they're using one of the compressed digests, which we can't reproduce anyway.
- layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest)
- // If it's not a layer, then it must be a data item.
- if len(layers) == 0 {
- b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String())
- if err != nil {
- return nil, -1, "", err
- }
- r := bytes.NewReader(b)
- logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
- return ioutil.NopCloser(r), int64(r.Len()), "", nil
- }
- // Step through the list of matching layers. Tests may want to verify that if we have multiple layers
- // which claim to have the same contents, that we actually do have multiple layers, otherwise we could
- // just go ahead and use the first one every time.
- i := s.layerPosition[info.Digest]
- s.layerPosition[info.Digest] = i + 1
- if len(layers) > 0 {
- layer = layers[i%len(layers)]
- }
- // Force the storage layer to not try to match any compression that was used when the layer was first
- // handed to it.
- noCompression := archive.Uncompressed
- diffOptions = &storage.DiffOptions{
- Compression: &noCompression,
- }
- if layer.UncompressedSize < 0 {
- n = -1
- } else {
- n = layer.UncompressedSize
- }
- logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest)
- rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
- if err != nil {
- return nil, -1, "", err
- }
- return rc, n, layer.ID, err
-}
-
-// GetManifest() reads the image's manifest.
-func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) {
- if len(s.cachedManifest) == 0 {
- cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, "manifest")
- if err != nil {
- return nil, "", err
- }
- s.cachedManifest = cachedBlob
- }
- return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
-}
-
-// UpdatedLayerInfos() returns the list of layer blobs that make up the root filesystem of
-// the image, after they've been decompressed.
-func (s *storageImageSource) UpdatedLayerInfos() []types.BlobInfo {
- simg, err := s.imageRef.transport.store.Image(s.ID)
- if err != nil {
- logrus.Errorf("error reading image %q: %v", s.ID, err)
- return nil
- }
- updatedBlobInfos := []types.BlobInfo{}
- layerID := simg.TopLayer
- _, manifestType, err := s.GetManifest()
- if err != nil {
- logrus.Errorf("error reading image manifest for %q: %v", s.ID, err)
- return nil
- }
- uncompressedLayerType := ""
- switch manifestType {
- case imgspecv1.MediaTypeImageManifest:
- uncompressedLayerType = imgspecv1.MediaTypeImageLayer
- case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
- // This is actually a compressed type, but there's no uncompressed type defined
- uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType
- }
- for layerID != "" {
- layer, err := s.imageRef.transport.store.Layer(layerID)
- if err != nil {
- logrus.Errorf("error reading layer %q in image %q: %v", layerID, s.ID, err)
- return nil
- }
- if layer.UncompressedDigest == "" {
- logrus.Errorf("uncompressed digest for layer %q is unknown", layerID)
- return nil
- }
- if layer.UncompressedSize < 0 {
- logrus.Errorf("uncompressed size for layer %q is unknown", layerID)
- return nil
- }
- blobInfo := types.BlobInfo{
- Digest: layer.UncompressedDigest,
- Size: layer.UncompressedSize,
- MediaType: uncompressedLayerType,
- }
- updatedBlobInfos = append([]types.BlobInfo{blobInfo}, updatedBlobInfos...)
- layerID = layer.Parent
- }
- return updatedBlobInfos
-}
-
-// GetTargetManifest() is not supported.
-func (s *storageImageSource) GetTargetManifest(d digest.Digest) (manifestBlob []byte, MIMEType string, err error) {
- return nil, "", ErrNoManifestLists
-}
-
-// GetSignatures() parses the image's signatures blob into a slice of byte slices.
-func (s *storageImageSource) GetSignatures(ctx context.Context) (signatures [][]byte, err error) {
- var offset int
- sigslice := [][]byte{}
- signature := []byte{}
- if len(s.SignatureSizes) > 0 {
- signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures")
- if err != nil {
- return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.ID)
- }
- signature = signatureBlob
- }
- for _, length := range s.SignatureSizes {
- sigslice = append(sigslice, signature[offset:offset+length])
- offset += length
- }
- if offset != len(signature) {
- return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset)
- }
- return sigslice, nil
-}
-
-// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
-// it's time to Commit() the image
+// newImageDestination sets us up to write a new image.
func newImageDestination(imageRef storageReference) (*storageImageDestination, error) {
- directory, err := ioutil.TempDir(temporaryDirectoryForBigFiles, "storage")
- if err != nil {
- return nil, errors.Wrapf(err, "error creating a temporary directory")
- }
- // Break reading of the reference we're writing, so that copy.Image() won't try to rewrite
- // schema1 image manifests to remove embedded references, since that changes the manifest's
- // digest, and that makes the image unusable if we subsequently try to access it using a
- // reference that mentions the no-longer-correct digest.
- publicRef := imageRef
- publicRef.name = nil
image := &storageImageDestination{
imageRef: imageRef,
- publicRef: publicRef,
- directory: directory,
- blobDiffIDs: make(map[digest.Digest]digest.Digest),
- fileSizes: make(map[digest.Digest]int64),
- filenames: make(map[digest.Digest]string),
+ Tag: imageRef.reference,
+ Created: time.Now(),
+ ID: imageRef.id,
+ BlobList: []types.BlobInfo{},
+ Layers: make(map[ddigest.Digest][]string),
+ BlobData: make(map[ddigest.Digest][]byte),
SignatureSizes: []int{},
}
return image, nil
}
-// Reference returns a mostly-usable image reference that can't return a DockerReference, to
-// avoid triggering logic in copy.Image() that rewrites schema 1 image manifests in order to
-// remove image names that they contain which don't match the value we're using.
+func (s storageImageSource) Reference() types.ImageReference {
+ return s.imageRef
+}
+
func (s storageImageDestination) Reference() types.ImageReference {
- return s.publicRef
+ return s.imageRef
}
-// Close cleans up the temporary directory.
-func (s *storageImageDestination) Close() error {
- return os.RemoveAll(s.directory)
+func (s storageImageSource) Close() error {
+ return nil
+}
+
+func (s storageImageDestination) Close() error {
+ return nil
}
-// ShouldCompressLayers indicates whether or not a caller should compress not-already-compressed
-// data when handing it to us.
func (s storageImageDestination) ShouldCompressLayers() bool {
- // We ultimately have to decompress layers to populate trees on disk, so callers shouldn't
- // bother compressing them before handing them to us, if they're not already compressed.
+ // We ultimately have to decompress layers to populate trees on disk,
+ // so callers shouldn't bother compressing them before handing them to
+ // us, if they're not already compressed.
return false
}
-// PutBlob stores a layer or data blob in our temporary directory, checking that any information
-// in the blobinfo matches the incoming data.
-func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) {
+// putBlob stores a layer or data blob, optionally enforcing that a digest in
+// blobinfo matches the incoming data.
+func (s *storageImageDestination) putBlob(stream io.Reader, blobinfo types.BlobInfo, enforceDigestAndSize bool) (types.BlobInfo, error) {
+ blobSize := blobinfo.Size
+ digest := blobinfo.Digest
errorBlobInfo := types.BlobInfo{
Digest: "",
Size: -1,
}
- // Set up to digest the blob and count its size while saving it to a file.
- hasher := digest.Canonical.Digester()
- if blobinfo.Digest.Validate() == nil {
- if a := blobinfo.Digest.Algorithm(); a.Available() {
+ // Try to read an initial snippet of the blob.
+ buf := [archive.HeaderSize]byte{}
+ n, err := io.ReadAtLeast(stream, buf[:], len(buf))
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return errorBlobInfo, err
+ }
+ // Set up to read the whole blob (the initial snippet, plus the rest)
+ // while digesting it with either the default, or the passed-in digest,
+ // if one was specified.
+ hasher := ddigest.Canonical.Digester()
+ if digest.Validate() == nil {
+ if a := digest.Algorithm(); a.Available() {
hasher = a.Digester()
}
}
- diffID := digest.Canonical.Digester()
- filename := filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
- file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
- if err != nil {
- return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename)
- }
- defer file.Close()
+ hash := ""
counter := ioutils.NewWriteCounter(hasher.Hash())
- reader := io.TeeReader(io.TeeReader(stream, counter), file)
- decompressed, err := archive.DecompressStream(reader)
- if err != nil {
- return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob")
- }
- // Copy the data to the file.
- _, err = io.Copy(diffID.Hash(), decompressed)
- decompressed.Close()
- if err != nil {
- return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename)
- }
- // Ensure that any information that we were given about the blob is correct.
- if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() {
- return errorBlobInfo, ErrBlobDigestMismatch
- }
- if blobinfo.Size >= 0 && blobinfo.Size != counter.Count {
- return errorBlobInfo, ErrBlobSizeMismatch
- }
- // Record information about the blob.
- s.blobDiffIDs[hasher.Digest()] = diffID.Digest()
- s.fileSizes[hasher.Digest()] = counter.Count
- s.filenames[hasher.Digest()] = filename
- blobDigest := blobinfo.Digest
- if blobDigest.Validate() != nil {
- blobDigest = hasher.Digest()
- }
- blobSize := blobinfo.Size
- if blobSize < 0 {
- blobSize = counter.Count
+ defragmented := io.MultiReader(bytes.NewBuffer(buf[:n]), stream)
+ multi := io.TeeReader(defragmented, counter)
+ if (n > 0) && archive.IsArchive(buf[:n]) {
+ // It's a filesystem layer. If it's not the first one in the
+ // image, we assume that the most recently added layer is its
+ // parent.
+ parentLayer := ""
+ for _, blob := range s.BlobList {
+ if layerList, ok := s.Layers[blob.Digest]; ok {
+ parentLayer = layerList[len(layerList)-1]
+ }
+ }
+ // If we have an expected content digest, generate a layer ID
+ // based on the parent's ID and the expected content digest.
+ id := ""
+ if digest.Validate() == nil {
+ id = ddigest.Canonical.FromBytes([]byte(parentLayer + "+" + digest.String())).Hex()
+ }
+ // Attempt to create the identified layer and import its contents.
+ layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi)
+ if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
+ logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err)
+ return errorBlobInfo, err
+ }
+ if errors.Cause(err) == storage.ErrDuplicateID {
+ // We specified an ID, and there's already a layer with
+ // the same ID. Drain the input so that we can look at
+ // its length and digest.
+ _, err := io.Copy(ioutil.Discard, multi)
+ if err != nil && err != io.EOF {
+ logrus.Debugf("error digesting layer blob %q: %v", blobinfo.Digest, id, err)
+ return errorBlobInfo, err
+ }
+ hash = hasher.Digest().String()
+ } else {
+ // Applied the layer with the specified ID. Note the
+ // size info and computed digest.
+ hash = hasher.Digest().String()
+ layerMeta := storageLayerMetadata{
+ Digest: hash,
+ CompressedSize: counter.Count,
+ Size: uncompressedSize,
+ }
+ if metadata, err := json.Marshal(&layerMeta); len(metadata) != 0 && err == nil {
+ s.imageRef.transport.store.SetMetadata(layer.ID, string(metadata))
+ }
+ // Hang on to the new layer's ID.
+ id = layer.ID
+ }
+ // Check if the size looks right.
+ if enforceDigestAndSize && blobinfo.Size >= 0 && blobinfo.Size != counter.Count {
+ logrus.Debugf("layer blob %q size is %d, not %d, rejecting", blobinfo.Digest, counter.Count, blobinfo.Size)
+ if layer != nil {
+ // Something's wrong; delete the newly-created layer.
+ s.imageRef.transport.store.DeleteLayer(layer.ID)
+ }
+ return errorBlobInfo, ErrBlobSizeMismatch
+ }
+ // If the content digest was specified, verify it.
+ if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash {
+ logrus.Debugf("layer blob %q digests to %q, rejecting", blobinfo.Digest, hash)
+ if layer != nil {
+ // Something's wrong; delete the newly-created layer.
+ s.imageRef.transport.store.DeleteLayer(layer.ID)
+ }
+ return errorBlobInfo, ErrBlobDigestMismatch
+ }
+ // If we didn't get a blob size, return the one we calculated.
+ if blobSize == -1 {
+ blobSize = counter.Count
+ }
+ // If we didn't get a digest, construct one.
+ if digest == "" {
+ digest = ddigest.Digest(hash)
+ }
+ // Record that this layer blob is a layer, and the layer ID it
+ // ended up having. This is a list, in case the same blob is
+ // being applied more than once.
+ s.Layers[digest] = append(s.Layers[digest], id)
+ s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: counter.Count})
+ if layer != nil {
+ logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id)
+ } else {
+ logrus.Debugf("layer blob %q already present as layer %q", blobinfo.Digest, id)
+ }
+ } else {
+ // It's just data. Finish scanning it in, check that our
+ // computed digest matches the passed-in digest, and store it,
+ // but leave it out of the blob-to-layer-ID map so that we can
+ // tell that it's not a layer.
+ blob, err := ioutil.ReadAll(multi)
+ if err != nil && err != io.EOF {
+ return errorBlobInfo, err
+ }
+ hash = hasher.Digest().String()
+ if enforceDigestAndSize && blobinfo.Size >= 0 && int64(len(blob)) != blobinfo.Size {
+ logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, int64(len(blob)), blobinfo.Size)
+ return errorBlobInfo, ErrBlobSizeMismatch
+ }
+ // If we were given a digest, verify that the content matches
+ // it.
+ if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash {
+ logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash)
+ return errorBlobInfo, ErrBlobDigestMismatch
+ }
+ // If we didn't get a blob size, return the one we calculated.
+ if blobSize == -1 {
+ blobSize = int64(len(blob))
+ }
+ // If we didn't get a digest, construct one.
+ if digest == "" {
+ digest = ddigest.Digest(hash)
+ }
+ // Save the blob for when we Commit().
+ s.BlobData[digest] = blob
+ s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: int64(len(blob))})
+ logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest)
}
return types.BlobInfo{
- Digest: blobDigest,
- Size: blobSize,
- MediaType: blobinfo.MediaType,
+ Digest: digest,
+ Size: blobSize,
}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be
-// reapplied using ReapplyBlob.
-//
+// PutBlob is used to both store filesystem layers and binary data that is part
+// of the image. Filesystem layers are assumed to be imported in order, as
+// that is required by some of the underlying storage drivers.
+func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) {
+ return s.putBlob(stream, blobinfo, true)
+}
+
+// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
// it returns a non-nil error only on an unexpected failure.
@@ -357,373 +296,93 @@ func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64,
if blobinfo.Digest == "" {
return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`)
}
- if err := blobinfo.Digest.Validate(); err != nil {
- return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
- }
- // Check if we've already cached it in a file.
- if size, ok := s.fileSizes[blobinfo.Digest]; ok {
- return true, size, nil
- }
- // Check if we have a wasn't-compressed layer in storage that's based on that blob.
- layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
- if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
- }
- if len(layers) > 0 {
- // Save this for completeness.
- s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, layers[0].UncompressedSize, nil
- }
- // Check if we have a was-compressed layer in storage that's based on that blob.
- layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
- if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
- }
- if len(layers) > 0 {
- // Record the uncompressed value so that we can use it to calculate layer IDs.
- s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, layers[0].CompressedSize, nil
+ for _, blob := range s.BlobList {
+ if blob.Digest == blobinfo.Digest {
+ return true, blob.Size, nil
+ }
}
- // Nope, we don't have it.
return false, -1, nil
}
-// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the
-// same one when it walks the list in the manifest.
func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) {
- present, size, err := s.HasBlob(blobinfo)
- if !present {
- return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo)
- }
+ err := blobinfo.Digest.Validate()
if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo)
+ return types.BlobInfo{}, err
}
- blobinfo.Size = size
- return blobinfo, nil
-}
-
-// computeID computes a recommended image ID based on information we have so far.
-func (s *storageImageDestination) computeID(m manifest.Manifest) string {
- mb, err := m.Serialize()
- if err != nil {
- return ""
- }
- switch manifest.GuessMIMEType(mb) {
- case manifest.DockerV2Schema2MediaType, imgspecv1.MediaTypeImageManifest:
- // For Schema2 and OCI1(?), the ID is just the hex part of the digest of the config blob.
- logrus.Debugf("trivial image ID for configured blob")
- configInfo := m.ConfigInfo()
- if configInfo.Digest.Validate() == nil {
- return configInfo.Digest.Hex()
- }
- return ""
- case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
- // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields
- // that aren't directly comparable using info from the manifest.
- logrus.Debugf("computing image ID using compat data")
- s1, ok := m.(*manifest.Schema1)
- if !ok {
- logrus.Debugf("schema type was guessed wrong?")
- return ""
- }
- if len(s1.History) == 0 {
- logrus.Debugf("image has no layers")
- return ""
- }
- s2 := struct {
- manifest.Schema2Image
- ID string `json:"id,omitempty"`
- Parent string `json:"parent,omitempty"`
- ParentID string `json:"parent_id,omitempty"`
- LayerID string `json:"layer_id,omitempty"`
- ThrowAway bool `json:"throwaway,omitempty"`
- Size int64 `json:",omitempty"`
- }{}
- config := []byte(s1.History[0].V1Compatibility)
- if json.Unmarshal(config, &s2) != nil {
- logrus.Debugf("error decoding configuration")
- return ""
- }
- // Images created with versions prior to 1.8.3 require us to rebuild the object.
- if s2.DockerVersion != "" && versions.LessThan(s2.DockerVersion, "1.8.3") {
- err = json.Unmarshal(config, &s2)
- if err != nil {
- logrus.Infof("error decoding compat image config %s: %v", string(config), err)
- return ""
- }
- config, err = json.Marshal(&s2)
- if err != nil {
- logrus.Infof("error re-encoding compat image config %#v: %v", s2, err)
- return ""
- }
- }
- // Build the history.
- for _, h := range s1.History {
- compat := manifest.Schema1V1Compatibility{}
- if json.Unmarshal([]byte(h.V1Compatibility), &compat) != nil {
- logrus.Debugf("error decoding history information")
- return ""
- }
- hitem := manifest.Schema2History{
- Created: compat.Created,
- CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "),
- Comment: compat.Comment,
- EmptyLayer: compat.ThrowAway,
- }
- s2.History = append([]manifest.Schema2History{hitem}, s2.History...)
- }
- // Build the rootfs information. We need the decompressed sums that we've been
- // calculating to fill in the DiffIDs.
- s2.RootFS = &manifest.Schema2RootFS{
- Type: "layers",
- }
- for _, fslayer := range s1.FSLayers {
- blobSum := fslayer.BlobSum
- diffID, ok := s.blobDiffIDs[blobSum]
- if !ok {
- logrus.Infof("error looking up diffID for blob %q", string(blobSum))
- return ""
- }
- s2.RootFS.DiffIDs = append([]digest.Digest{diffID}, s2.RootFS.DiffIDs...)
- }
- // And now for some raw manipulation.
- raw := make(map[string]*json.RawMessage)
- err = json.Unmarshal(config, &raw)
- if err != nil {
- logrus.Infof("error re-decoding compat image config %#v: %v", s2, err)
- return ""
- }
- // Drop some fields.
- delete(raw, "id")
- delete(raw, "parent")
- delete(raw, "parent_id")
- delete(raw, "layer_id")
- delete(raw, "throwaway")
- delete(raw, "Size")
- // Add the history and rootfs information.
- rootfs, err := json.Marshal(s2.RootFS)
- if err != nil {
- logrus.Infof("error encoding rootfs information %#v: %v", s2.RootFS, err)
- return ""
- }
- rawRootfs := json.RawMessage(rootfs)
- raw["rootfs"] = &rawRootfs
- history, err := json.Marshal(s2.History)
- if err != nil {
- logrus.Infof("error encoding history information %#v: %v", s2.History, err)
- return ""
- }
- rawHistory := json.RawMessage(history)
- raw["history"] = &rawHistory
- // Encode the result, and take the digest of that result.
- config, err = json.Marshal(raw)
+ if layerList, ok := s.Layers[blobinfo.Digest]; !ok || len(layerList) < 1 {
+ b, err := s.imageRef.transport.store.ImageBigData(s.ID, blobinfo.Digest.String())
if err != nil {
- logrus.Infof("error re-encoding compat image config %#v: %v", s2, err)
- return ""
+ return types.BlobInfo{}, err
}
- return digest.FromBytes(config).Hex()
- case manifest.DockerV2ListMediaType:
- logrus.Debugf("no image ID for manifest list")
- // FIXME
- case imgspecv1.MediaTypeImageIndex:
- logrus.Debugf("no image ID for manifest index")
- // FIXME
- default:
- logrus.Debugf("no image ID for unrecognized manifest type %q", manifest.GuessMIMEType(mb))
- // FIXME
- }
- return ""
-}
-
-// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
-// information out of it for Inspect().
-func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) {
- if info.Digest == "" {
- return nil, errors.Errorf(`no digest supplied when reading blob`)
- }
- if err := info.Digest.Validate(); err != nil {
- return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`)
+ return types.BlobInfo{Digest: blobinfo.Digest, Size: int64(len(b))}, nil
}
- // Assume it's a file, since we're only calling this from a place that expects to read files.
- if filename, ok := s.filenames[info.Digest]; ok {
- contents, err2 := ioutil.ReadFile(filename)
- if err2 != nil {
- return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename)
- }
- return contents, nil
+ layerList := s.Layers[blobinfo.Digest]
+ rc, _, err := diffLayer(s.imageRef.transport.store, layerList[len(layerList)-1])
+ if err != nil {
+ return types.BlobInfo{}, err
}
- // If it's not a file, it's a bug, because we're not expecting to be asked for a layer.
- return nil, errors.New("blob not found")
+ return s.putBlob(rc, blobinfo, false)
}
func (s *storageImageDestination) Commit() error {
- // Find the list of layer blobs.
- if len(s.manifest) == 0 {
- return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
- }
- man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
- if err != nil {
- return errors.Wrapf(err, "error parsing manifest")
- }
- layerBlobs := man.LayerInfos()
- // Extract or find the layers.
+ // Create the image record.
lastLayer := ""
- addedLayers := []string{}
- for _, blob := range layerBlobs {
- var diff io.ReadCloser
- // Check if there's already a layer with the ID that we'd give to the result of applying
- // this layer blob to its parent, if it has one, or the blob's hex value otherwise.
- diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
- if !haveDiffID {
- // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
- // or to even check if we had it.
- logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
- has, _, err := s.HasBlob(blob)
- if err != nil {
- return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
- }
- if !has {
- return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String())
- }
- diffID, haveDiffID = s.blobDiffIDs[blob.Digest]
- if !haveDiffID {
- return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String())
- }
- }
- id := diffID.Hex()
- if lastLayer != "" {
- id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex()
- }
- if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
- // There's already a layer that should have the right contents, just reuse it.
- lastLayer = layer.ID
- continue
- }
- // Check if we cached a file with that blobsum. If we didn't already have a layer with
- // the blob's contents, we should have gotten a copy.
- if filename, ok := s.filenames[blob.Digest]; ok {
- // Use the file's contents to initialize the layer.
- file, err2 := os.Open(filename)
- if err2 != nil {
- return errors.Wrapf(err2, "error opening file %q", filename)
- }
- defer file.Close()
- diff = file
- }
- if diff == nil {
- // Try to find a layer with contents matching that blobsum.
- layer := ""
- layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest)
- if err2 == nil && len(layers) > 0 {
- layer = layers[0].ID
- } else {
- layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest)
- if err2 == nil && len(layers) > 0 {
- layer = layers[0].ID
- }
- }
- if layer == "" {
- return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest)
- }
- // Use the layer's contents to initialize the new layer.
- noCompression := archive.Uncompressed
- diffOptions := &storage.DiffOptions{
- Compression: &noCompression,
- }
- diff, err2 = s.imageRef.transport.store.Diff("", layer, diffOptions)
- if err2 != nil {
- return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest)
- }
- defer diff.Close()
- }
- if diff == nil {
- // This shouldn't have happened.
- return errors.Errorf("error applying blob %q: content not found", blob.Digest)
+ for _, blob := range s.BlobList {
+ if layerList, ok := s.Layers[blob.Digest]; ok {
+ lastLayer = layerList[len(layerList)-1]
}
- // Build the new layer using the diff, regardless of where it came from.
- layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, diff)
- if err != nil {
- return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest)
- }
- lastLayer = layer.ID
- addedLayers = append([]string{lastLayer}, addedLayers...)
- }
- // If one of those blobs was a configuration blob, then we can try to dig out the date when the image
- // was originally created, in case we're just copying it. If not, no harm done.
- var options *storage.ImageOptions
- if inspect, err := man.Inspect(s.getConfigBlob); err == nil {
- logrus.Debugf("setting image creation date to %s", inspect.Created)
- options = &storage.ImageOptions{
- CreationDate: inspect.Created,
- }
- }
- // Create the image record, pointing to the most-recently added layer.
- intendedID := s.imageRef.id
- if intendedID == "" {
- intendedID = s.computeID(man)
}
- oldNames := []string{}
- img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options)
+ img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil)
if err != nil {
if errors.Cause(err) != storage.ErrDuplicateID {
logrus.Debugf("error creating image: %q", err)
- return errors.Wrapf(err, "error creating image %q", intendedID)
+ return errors.Wrapf(err, "error creating image %q", s.ID)
}
- img, err = s.imageRef.transport.store.Image(intendedID)
+ img, err = s.imageRef.transport.store.Image(s.ID)
if err != nil {
- return errors.Wrapf(err, "error reading image %q", intendedID)
+ return errors.Wrapf(err, "error reading image %q", s.ID)
}
if img.TopLayer != lastLayer {
- logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID)
- return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID)
+ logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", s.ID)
+ return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", s.ID)
}
logrus.Debugf("reusing image ID %q", img.ID)
- oldNames = append(oldNames, img.Names...)
} else {
logrus.Debugf("created new image ID %q", img.ID)
}
- // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so
- // we just need to screen out the ones that are actually layers to get the list of non-layers.
- dataBlobs := make(map[digest.Digest]struct{})
- for blob := range s.filenames {
- dataBlobs[blob] = struct{}{}
+ s.ID = img.ID
+ names := img.Names
+ if s.Tag != "" {
+ names = append(names, s.Tag)
}
- for _, layerBlob := range layerBlobs {
- delete(dataBlobs, layerBlob.Digest)
- }
- for blob := range dataBlobs {
- v, err := ioutil.ReadFile(s.filenames[blob])
- if err != nil {
- return errors.Wrapf(err, "error copying non-layer blob %q to image", blob)
- }
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v); err != nil {
+ // We have names to set, so move those names to this image.
+ if len(names) > 0 {
+ if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
- logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err)
- return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID)
+ logrus.Debugf("error setting names on image %q: %v", img.ID, err)
+ return err
}
+ logrus.Debugf("set names of image %q to %v", img.ID, names)
}
- // Set the reference's name on the image.
- if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil {
- names := []string{}
- if name != nil {
- names = append(names, verboseName(name))
- }
- if len(oldNames) > 0 {
- names = append(names, oldNames...)
- }
- if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil {
+ // Save the data blobs to disk, and drop their contents from memory.
+ keys := []ddigest.Digest{}
+ for k, v := range s.BlobData {
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, k.String(), v); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
- logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err)
- return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID)
+ logrus.Debugf("error saving big data %q for image %q: %v", k, img.ID, err)
+ return err
}
- logrus.Debugf("set names of image %q to %v", img.ID, names)
+ keys = append(keys, k)
+ }
+ for _, key := range keys {
+ delete(s.BlobData, key)
}
- // Save the manifest.
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, "manifest", s.manifest); err != nil {
+ // Save the manifest, if we have one.
+ if err := s.imageRef.transport.store.SetImageBigData(s.ID, "manifest", s.Manifest); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
@@ -731,14 +390,12 @@ func (s *storageImageDestination) Commit() error {
return err
}
// Save the signatures, if we have any.
- if len(s.signatures) > 0 {
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures); err != nil {
- if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
- logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
- }
- logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
- return err
+ if err := s.imageRef.transport.store.SetImageBigData(s.ID, "signatures", s.Signatures); err != nil {
+ if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
+ logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
+ logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
+ return err
}
// Save our metadata.
metadata, err := json.Marshal(s)
@@ -750,7 +407,7 @@ func (s *storageImageDestination) Commit() error {
return err
}
if len(metadata) != 0 {
- if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil {
+ if err = s.imageRef.transport.store.SetMetadata(s.ID, string(metadata)); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
@@ -763,7 +420,7 @@ func (s *storageImageDestination) Commit() error {
}
var manifestMIMETypes = []string{
- imgspecv1.MediaTypeImageManifest,
+ // TODO(runcom): we'll add OCI as part of another PR here
manifest.DockerV2Schema2MediaType,
manifest.DockerV2Schema1SignedMediaType,
manifest.DockerV2Schema1MediaType,
@@ -773,20 +430,23 @@ func (s *storageImageDestination) SupportedManifestMIMETypes() []string {
return manifestMIMETypes
}
-// PutManifest writes the manifest to the destination.
+// PutManifest writes manifest to the destination.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (s *storageImageDestination) PutManifest(manifest []byte) error {
- s.manifest = make([]byte, len(manifest))
- copy(s.manifest, manifest)
+ s.Manifest = make([]byte, len(manifest))
+ copy(s.Manifest, manifest)
return nil
}
-// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was
-// previously supplied to PutSignatures().
+// SupportsSignatures returns an error if we can't expect GetSignatures() to
+// return data that was previously supplied to PutSignatures().
func (s *storageImageDestination) SupportsSignatures() error {
return nil
}
-// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (s *storageImageDestination) AcceptsForeignLayerURLs() bool {
return false
@@ -797,7 +457,6 @@ func (s *storageImageDestination) MustMatchRuntimeOS() bool {
return true
}
-// PutSignatures records the image's signatures for committing as a single data blob.
func (s *storageImageDestination) PutSignatures(signatures [][]byte) error {
sizes := []int{}
sigblob := []byte{}
@@ -808,62 +467,156 @@ func (s *storageImageDestination) PutSignatures(signatures [][]byte) error {
copy(newblob[len(sigblob):], sig)
sigblob = newblob
}
- s.signatures = sigblob
+ s.Signatures = sigblob
s.SignatureSizes = sizes
return nil
}
-// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the
-// signatures, and the uncompressed sizes of all of the image's layers.
+func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
+ rc, n, _, err = s.getBlobAndLayerID(info)
+ return rc, n, err
+}
+
+func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) {
+ err = info.Digest.Validate()
+ if err != nil {
+ return nil, -1, "", err
+ }
+ if layerList, ok := s.Layers[info.Digest]; !ok || len(layerList) < 1 {
+ b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String())
+ if err != nil {
+ return nil, -1, "", err
+ }
+ r := bytes.NewReader(b)
+ logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
+ return ioutil.NopCloser(r), int64(r.Len()), "", nil
+ }
+ // If the blob was "put" more than once, we have multiple layer IDs
+ // which should all produce the same diff. For the sake of tests that
+ // want to make sure we created different layers each time the blob was
+ // "put", though, cycle through the layers.
+ layerList := s.Layers[info.Digest]
+ position, ok := s.LayerPosition[info.Digest]
+ if !ok {
+ position = 0
+ }
+ s.LayerPosition[info.Digest] = (position + 1) % len(layerList)
+ logrus.Debugf("exporting filesystem layer %q for blob %q", layerList[position], info.Digest)
+ rc, n, err = diffLayer(s.imageRef.transport.store, layerList[position])
+ return rc, n, layerList[position], err
+}
+
+func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64, err error) {
+ layer, err := store.Layer(layerID)
+ if err != nil {
+ return nil, -1, err
+ }
+ layerMeta := storageLayerMetadata{
+ CompressedSize: -1,
+ }
+ if layer.Metadata != "" {
+ if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil {
+ return nil, -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID)
+ }
+ }
+ if layerMeta.CompressedSize <= 0 {
+ n = -1
+ } else {
+ n = layerMeta.CompressedSize
+ }
+ diff, err := store.Diff("", layer.ID, nil)
+ if err != nil {
+ return nil, -1, err
+ }
+ return diff, n, nil
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *storageImageSource) GetManifest(instanceDigest *ddigest.Digest) (manifestBlob []byte, MIMEType string, err error) {
+ if instanceDigest != nil {
+ return nil, "", ErrNoManifestLists
+ }
+ manifestBlob, err = s.imageRef.transport.store.ImageBigData(s.ID, "manifest")
+ return manifestBlob, manifest.GuessMIMEType(manifestBlob), err
+}
+
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *ddigest.Digest) ([][]byte, error) {
+ if instanceDigest != nil {
+ return nil, ErrNoManifestLists
+ }
+ var offset int
+ signature, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures")
+ if err != nil {
+ return nil, err
+ }
+ sigslice := [][]byte{}
+ for _, length := range s.SignatureSizes {
+ sigslice = append(sigslice, signature[offset:offset+length])
+ offset += length
+ }
+ if offset != len(signature) {
+ return nil, errors.Errorf("signatures data contained %d extra bytes", len(signature)-offset)
+ }
+ return sigslice, nil
+}
+
func (s *storageImageSource) getSize() (int64, error) {
var sum int64
- // Size up the data blobs.
- dataNames, err := s.imageRef.transport.store.ListImageBigData(s.ID)
+ names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id)
if err != nil {
- return -1, errors.Wrapf(err, "error reading image %q", s.ID)
+ return -1, errors.Wrapf(err, "error reading image %q", s.imageRef.id)
}
- for _, dataName := range dataNames {
- bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.ID, dataName)
+ for _, name := range names {
+ bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.imageRef.id, name)
if err != nil {
- return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.ID)
+ return -1, errors.Wrapf(err, "error reading data blob size %q for %q", name, s.imageRef.id)
}
sum += bigSize
}
- // Add the signature sizes.
for _, sigSize := range s.SignatureSizes {
sum += int64(sigSize)
}
- // Prepare to walk the layer list.
- img, err := s.imageRef.transport.store.Image(s.ID)
- if err != nil {
- return -1, errors.Wrapf(err, "error reading image info %q", s.ID)
- }
- // Walk the layer list.
- layerID := img.TopLayer
- for layerID != "" {
- layer, err := s.imageRef.transport.store.Layer(layerID)
- if err != nil {
- return -1, err
- }
- if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 {
- return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID)
- }
- sum += layer.UncompressedSize
- if layer.Parent == "" {
- break
+ for _, layerList := range s.Layers {
+ for _, layerID := range layerList {
+ layer, err := s.imageRef.transport.store.Layer(layerID)
+ if err != nil {
+ return -1, err
+ }
+ layerMeta := storageLayerMetadata{
+ Size: -1,
+ }
+ if layer.Metadata != "" {
+ if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil {
+ return -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID)
+ }
+ }
+ if layerMeta.Size < 0 {
+ return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID)
+ }
+ sum += layerMeta.Size
}
- layerID = layer.Parent
}
return sum, nil
}
-// newImage creates an image that also knows its size
-func newImage(s storageReference) (types.Image, error) {
+func (s *storageImageCloser) Size() (int64, error) {
+ return s.size, nil
+}
+
+// newImage creates an ImageCloser that also knows its size
+func newImage(ctx *types.SystemContext, s storageReference) (types.ImageCloser, error) {
src, err := newImageSource(s)
if err != nil {
return nil, err
}
- img, err := image.FromSource(src)
+ img, err := image.FromSource(ctx, src)
if err != nil {
return nil, err
}
@@ -871,10 +624,5 @@ func newImage(s storageReference) (types.Image, error) {
if err != nil {
return nil, err
}
- return &storageImage{Image: img, size: size}, nil
-}
-
-// Size() returns the previously-computed size of the image, with no error.
-func (s storageImage) Size() (int64, error) {
- return s.size, nil
+ return &storageImageCloser{ImageCloser: img, size: size}, nil
}
diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go
index b7da47ac5..efad95ce6 100644
--- a/vendor/github.com/containers/image/storage/storage_reference.go
+++ b/vendor/github.com/containers/image/storage/storage_reference.go
@@ -8,7 +8,6 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/types"
"github.com/containers/storage"
- digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -21,11 +20,9 @@ type storageReference struct {
reference string
id string
name reference.Named
- tag string
- digest digest.Digest
}
-func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference {
+func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference {
// We take a copy of the transport, which contains a pointer to the
// store that it used for resolving this reference, so that the
// transport that we'll return from Transport() won't be affected by
@@ -35,8 +32,6 @@ func newReference(transport storageTransport, reference, id string, name referen
reference: reference,
id: id,
name: name,
- tag: tag,
- digest: digest,
}
}
@@ -83,21 +78,8 @@ func (s storageReference) Transport() types.ImageTransport {
}
}
-// Return a name with a tag or digest, if we have either, else return it bare.
+// Return a name with a tag, if we have a name to base them on.
func (s storageReference) DockerReference() reference.Named {
- if s.name == nil {
- return nil
- }
- if s.tag != "" {
- if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil {
- return namedTagged
- }
- }
- if s.digest != "" {
- if canonical, err := reference.WithDigest(s.name, s.digest); err == nil {
- return canonical
- }
- }
return s.name
}
@@ -111,7 +93,7 @@ func (s storageReference) StringWithinTransport() string {
optionsList = ":" + strings.Join(options, ",")
}
storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
- if s.reference == "" {
+ if s.name == nil {
return storeSpec + "@" + s.id
}
if s.id == "" {
@@ -140,8 +122,11 @@ func (s storageReference) PolicyConfigurationNamespaces() []string {
driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]"
namespaces := []string{}
if s.name != nil {
- name := reference.TrimNamed(s.name)
- components := strings.Split(name.String(), "/")
+ if s.id != "" {
+ // The reference without the ID is also a valid namespace.
+ namespaces = append(namespaces, storeSpec+s.reference)
+ }
+ components := strings.Split(s.name.Name(), "/")
for len(components) > 0 {
namespaces = append(namespaces, storeSpec+strings.Join(components, "/"))
components = components[:len(components)-1]
@@ -152,8 +137,13 @@ func (s storageReference) PolicyConfigurationNamespaces() []string {
return namespaces
}
-func (s storageReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
- return newImage(s)
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (s storageReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) {
+ return newImage(ctx, s)
}
func (s storageReference) DeleteImage(ctx *types.SystemContext) error {
diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go
index d21cf02c8..df4578a8d 100644
--- a/vendor/github.com/containers/image/storage/storage_transport.go
+++ b/vendor/github.com/containers/image/storage/storage_transport.go
@@ -13,14 +13,11 @@ import (
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
- digest "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/go-digest"
+ ddigest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
-const (
- minimumTruncatedIDLength = 3
-)
-
func init() {
transports.Register(Transport)
}
@@ -106,133 +103,69 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap {
// relative to the given store, and returns it in a reference object.
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
var name reference.Named
+ var sum digest.Digest
+ var err error
if ref == "" {
- return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference")
+ return nil, ErrInvalidReference
}
if ref[0] == '[' {
// Ignore the store specifier.
closeIndex := strings.IndexRune(ref, ']')
if closeIndex < 1 {
- return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref)
+ return nil, ErrInvalidReference
}
ref = ref[closeIndex+1:]
}
-
- // The last segment, if there's more than one, is either a digest from a reference, or an image ID.
- split := strings.LastIndex(ref, "@")
- idOrDigest := ""
- if split != -1 {
- // Peel off that last bit so that we can work on the rest.
- idOrDigest = ref[split+1:]
- if idOrDigest == "" {
- return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest)
- }
- ref = ref[:split]
- }
-
- // The middle segment (now the last segment), if there is one, is a digest.
- split = strings.LastIndex(ref, "@")
- sum := digest.Digest("")
- if split != -1 {
- sum = digest.Digest(ref[split+1:])
- if sum == "" {
- return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum)
- }
- ref = ref[:split]
- }
-
- // If we have something that unambiguously should be a digest, validate it, and then the third part,
- // if we have one, as an ID.
- id := ""
- if sum != "" {
- if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil {
- return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest)
- }
- if err := sum.Validate(); err != nil {
- return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum)
- }
- id = idOrDigest
- if img, err := store.Image(idOrDigest); err == nil && img != nil && len(id) >= minimumTruncatedIDLength {
- // The ID is a truncated version of the ID of an image that's present in local storage,
- // so we might as well use the expanded value.
- id = img.ID
- }
- } else if idOrDigest != "" {
- // There was no middle portion, so the final portion could be either a digest or an ID.
- if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil {
- // It's an ID.
- id = idOrDigest
- } else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil {
- // It's a digest.
- sum = idSum
- } else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength {
- // It's a truncated version of the ID of an image that's present in local storage,
- // and we may need the expanded value.
- id = img.ID
- } else {
- return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest)
+ refInfo := strings.SplitN(ref, "@", 2)
+ if len(refInfo) == 1 {
+ // A name.
+ name, err = reference.ParseNormalizedNamed(refInfo[0])
+ if err != nil {
+ return nil, err
}
- }
-
- // If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's
- // at least of what we guess is a reasonable minimum length, because we don't want a really short value
- // like "a" matching an image by ID prefix when the input was actually meant to specify an image name.
- if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" {
- if img, err := store.Image(idOrDigest); err == nil && img != nil {
- // It's a truncated version of the ID of an image that's present in local storage;
- // we need to expand it.
- id = img.ID
- ref = ""
+ } else if len(refInfo) == 2 {
+ // An ID, possibly preceded by a name.
+ if refInfo[0] != "" {
+ name, err = reference.ParseNormalizedNamed(refInfo[0])
+ if err != nil {
+ return nil, err
+ }
}
- }
-
- // The initial portion is probably a name, possibly with a tag.
- if ref != "" {
- var err error
- if name, err = reference.ParseNormalizedNamed(ref); err != nil {
- return nil, errors.Wrapf(err, "error parsing named reference %q", ref)
+ sum, err = digest.Parse(refInfo[1])
+ if err != nil || sum.Validate() != nil {
+ sum, err = digest.Parse("sha256:" + refInfo[1])
+ if err != nil || sum.Validate() != nil {
+ return nil, err
+ }
}
+ } else { // Coverage: len(refInfo) is always 1 or 2
+ // Anything else: store specified in a form we don't
+ // recognize.
+ return nil, ErrInvalidReference
}
- if name == nil && sum == "" && id == "" {
- return nil, errors.Errorf("error parsing reference")
- }
-
- // Construct a copy of the store spec.
optionsList := ""
options := store.GraphOptions()
if len(options) > 0 {
optionsList = ":" + strings.Join(options, ",")
}
storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]"
-
- // Convert the name back into a reference string, if we got a name.
+ id := ""
+ if sum.Validate() == nil {
+ id = sum.Hex()
+ }
refname := ""
- tag := ""
if name != nil {
- if sum.Validate() == nil {
- canonical, err := reference.WithDigest(name, sum)
- if err != nil {
- return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum)
- }
- refname = verboseName(canonical)
- } else {
- name = reference.TagNameOnly(name)
- tagged, ok := name.(reference.Tagged)
- if !ok {
- return nil, errors.Errorf("error parsing possibly-tagless name %q", ref)
- }
- refname = verboseName(name)
- tag = tagged.Tag()
- }
+ name = reference.TagNameOnly(name)
+ refname = verboseName(name)
}
if refname == "" {
- logrus.Debugf("parsed reference into %q", storeSpec+"@"+id)
+ logrus.Debugf("parsed reference to id into %q", storeSpec+"@"+id)
} else if id == "" {
- logrus.Debugf("parsed reference into %q", storeSpec+refname)
+ logrus.Debugf("parsed reference to refname into %q", storeSpec+refname)
} else {
- logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id)
+ logrus.Debugf("parsed reference to refname@id into %q", storeSpec+refname+"@"+id)
}
- return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil
+ return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name), nil
}
func (s *storageTransport) GetStore() (storage.Store, error) {
@@ -251,14 +184,11 @@ func (s *storageTransport) GetStore() (storage.Store, error) {
return s.store, nil
}
-// ParseReference takes a name and a tag or digest and/or ID
-// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"),
+// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"),
// possibly prefixed with a store specifier in the form "[_graphroot_]" or
// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or
// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]",
// tries to figure out which it is, and returns it in a reference object.
-// If _id_ is the ID of an image that's present in local storage, it can be truncated, and
-// even be specified as if it were a _name_, value.
func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) {
var store storage.Store
// Check if there's a store location prefix. If there is, then it
@@ -407,7 +337,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
if err != nil {
return err
}
- _, err = digest.Parse("sha256:" + scopeInfo[1])
+ _, err = ddigest.Parse("sha256:" + scopeInfo[1])
if err != nil {
return err
}
@@ -417,28 +347,11 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
return nil
}
-func verboseName(r reference.Reference) string {
- if r == nil {
- return ""
- }
- named, isNamed := r.(reference.Named)
- digested, isDigested := r.(reference.Digested)
- tagged, isTagged := r.(reference.Tagged)
- name := ""
+func verboseName(name reference.Named) string {
+ name = reference.TagNameOnly(name)
tag := ""
- sum := ""
- if isNamed {
- name = (reference.TrimNamed(named)).String()
- }
- if isTagged {
- if tagged.Tag() != "" {
- tag = ":" + tagged.Tag()
- }
- }
- if isDigested {
- if digested.Digest().Validate() == nil {
- sum = "@" + digested.Digest().String()
- }
+ if tagged, ok := name.(reference.NamedTagged); ok {
+ tag = ":" + tagged.Tag()
}
- return name + tag + sum
+ return name.Name() + tag
}
diff --git a/vendor/github.com/containers/image/tarball/tarball_reference.go b/vendor/github.com/containers/image/tarball/tarball_reference.go
index 18967041a..4ccfb4063 100644
--- a/vendor/github.com/containers/image/tarball/tarball_reference.go
+++ b/vendor/github.com/containers/image/tarball/tarball_reference.go
@@ -61,12 +61,17 @@ func (r *tarballReference) PolicyConfigurationNamespaces() []string {
return nil
}
-func (r *tarballReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (r *tarballReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) {
src, err := r.NewImageSource(ctx)
if err != nil {
return nil, err
}
- img, err := image.FromSource(src)
+ img, err := image.FromSource(ctx, src)
if err != nil {
src.Close()
return nil, err
diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/tarball/tarball_src.go
index 22b98c16c..872a446a1 100644
--- a/vendor/github.com/containers/image/tarball/tarball_src.go
+++ b/vendor/github.com/containers/image/tarball/tarball_src.go
@@ -228,18 +228,28 @@ func (is *tarballImageSource) GetBlob(blobinfo types.BlobInfo) (io.ReadCloser, i
return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String())
}
-func (is *tarballImageSource) GetManifest() ([]byte, string, error) {
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (is *tarballImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName)
+ }
return is.manifest, imgspecv1.MediaTypeImageManifest, nil
}
-func (*tarballImageSource) GetSignatures(context.Context) ([][]byte, error) {
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ if instanceDigest != nil {
+ return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName)
+ }
return nil, nil
}
-func (*tarballImageSource) GetTargetManifest(digest.Digest) ([]byte, string, error) {
- return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName)
-}
-
func (is *tarballImageSource) Reference() types.ImageReference {
return &is.reference
}
diff --git a/vendor/github.com/containers/image/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/transports/alltransports/storage_stub.go
index 1b7b005eb..4ac684e58 100644
--- a/vendor/github.com/containers/image/transports/alltransports/storage_stub.go
+++ b/vendor/github.com/containers/image/transports/alltransports/storage_stub.go
@@ -5,5 +5,5 @@ package alltransports
import "github.com/containers/image/transports"
func init() {
- transports.Register(transports.NewStubTransport("storage"))
+ transports.Register(transports.NewStubTransport("containers-storage"))
}
diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go
index bae7319ac..176887480 100644
--- a/vendor/github.com/containers/image/types/types.go
+++ b/vendor/github.com/containers/image/types/types.go
@@ -73,11 +73,12 @@ type ImageReference interface {
// and each following element to be a prefix of the element preceding it.
PolicyConfigurationNamespaces() []string
- // NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
- // The caller must call .Close() on the returned Image.
+ // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+ // The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
- NewImage(ctx *SystemContext) (Image, error)
+ // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+ NewImage(ctx *SystemContext) (ImageCloser, error)
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
NewImageSource(ctx *SystemContext) (ImageSource, error)
@@ -99,7 +100,7 @@ type BlobInfo struct {
MediaType string
}
-// ImageSource is a service, possibly remote (= slow), to download components of a single image.
+// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list).
// This is primarily useful for copying images around; for examining their properties, Image (below)
// is usually more useful.
// Each ImageSource should eventually be closed by calling Close().
@@ -114,19 +115,17 @@ type ImageSource interface {
Close() error
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
- GetManifest() ([]byte, string, error)
- // GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest
- // out of a manifest list.
- GetTargetManifest(digest digest.Digest) ([]byte, string, error)
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+ // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+ GetManifest(instanceDigest *digest.Digest) ([]byte, string, error)
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
GetBlob(BlobInfo) (io.ReadCloser, int64, error)
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
- GetSignatures(context.Context) ([][]byte, error)
- // UpdatedLayerInfos returns either nil (meaning there are no updates), or updated values for the layer blobsums that are listed in the image's manifest.
- // The Digest field is guaranteed to be provided; Size may be -1.
- // WARNING: The list may contain duplicates, and they are semantically relevant.
- UpdatedLayerInfos() []BlobInfo
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+ // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+ // (e.g. if the source never returns manifest lists).
+ GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error)
}
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
@@ -200,25 +199,24 @@ func (e ManifestTypeRejectedError) Error() string {
// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them,
// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else.
// This also makes the UnparsedImage→Image conversion an explicitly visible step.
-// Each UnparsedImage should eventually be closed by calling Close().
+//
+// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
+//
+// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
type UnparsedImage interface {
// Reference returns the reference used to set up this source, _as specified by the user_
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
Reference() ImageReference
- // Close removes resources associated with an initialized UnparsedImage, if any.
- Close() error
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
Manifest() ([]byte, string, error)
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
Signatures(ctx context.Context) ([][]byte, error)
- // UpdatedLayerInfos returns either nil (meaning there are no updates), or updated values for the layer blobsums that are listed in the image's manifest.
- // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
- // WARNING: The list may contain duplicates, and they are semantically relevant.
- UpdatedLayerInfos() []BlobInfo
}
// Image is the primary API for inspecting properties of images.
-// Each Image should eventually be closed by calling Close().
+// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
+//
+// The Image must not be used after the underlying ImageSource is Close()d.
type Image interface {
// Note that Reference may return nil in the return value of UpdatedImage!
UnparsedImage
@@ -250,13 +248,20 @@ type Image interface {
// Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired.
// This does not change the state of the original Image object.
UpdatedImage(options ManifestUpdateOptions) (Image, error)
- // IsMultiImage returns true if the image's manifest is a list of images, false otherwise.
- IsMultiImage() bool
// Size returns an approximation of the amount of disk space which is consumed by the image in its current
// location. If the size is not known, -1 will be returned.
Size() (int64, error)
}
+// ImageCloser is an Image with a Close() method which must be called by the user.
+// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource,
+// to ensure that the ImageSource is closed.
+type ImageCloser interface {
+ Image
+ // Close removes resources associated with an initialized ImageCloser.
+ Close() error
+}
+
// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest
type ManifestUpdateOptions struct {
LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored.
@@ -293,7 +298,7 @@ type DockerAuthConfig struct {
Password string
}
-// SystemContext allows parametrizing access to implicitly-accessed resources,
+// SystemContext allows parameterizing access to implicitly-accessed resources,
// like configuration files in /etc and users' login state in their home directory.
// Various components can share the same field only if their semantics is exactly
// the same; if in doubt, add a new field.
@@ -316,6 +321,10 @@ type SystemContext struct {
SystemRegistriesConfPath string
// If not "", overrides the default path for the authentication file
AuthFilePath string
+ // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match.
+ ArchitectureChoice string
+ // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match.
+ OSChoice string
// === OCI.Transport overrides ===
// If not "", a directory containing a CA certificate (ending with ".crt"),
@@ -324,6 +333,8 @@ type SystemContext struct {
OCICertPath string
// Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
OCIInsecureSkipTLSVerify bool
+ // If not "", use a shared directory for storing blobs rather than within OCI layouts
+ OCISharedBlobDirPath string
// === docker.Transport overrides ===
// If not "", a directory containing a CA certificate (ending with ".crt"),
@@ -332,8 +343,9 @@ type SystemContext struct {
DockerCertPath string
// If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above.
// Ignored if DockerCertPath is non-empty.
- DockerPerHostCertDirPath string
- DockerInsecureSkipTLSVerify bool // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
+ DockerPerHostCertDirPath string
+ // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
+ DockerInsecureSkipTLSVerify bool
// if nil, the library tries to parse ~/.docker/config.json to retrieve credentials
DockerAuthConfig *DockerAuthConfig
// if not "", an User-Agent header is added to each request when contacting a registry.
@@ -344,6 +356,20 @@ type SystemContext struct {
DockerDisableV1Ping bool
// Directory to use for OSTree temporary files
OSTreeTmpDirPath string
+
+ // === docker/daemon.Transport overrides ===
+ // A directory containing a CA certificate (ending with ".crt"),
+ // a client certificate (ending with ".cert") and a client certificate key
+ // (ending with ".key") used when talking to a Docker daemon.
+ DockerDaemonCertPath string
+ // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed.
+ DockerDaemonHost string
+ // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well.
+ DockerDaemonInsecureSkipTLSVerify bool
+
+ // === dir.Transport overrides ===
+ // DirForceCompress compresses the image layers if set to true
+ DirForceCompress bool
}
// ProgressProperties is used to pass information from the copy code to a monitor which
diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf
index 3263f5800..20b3f7e1a 100644
--- a/vendor/github.com/containers/image/vendor.conf
+++ b/vendor/github.com/containers/image/vendor.conf
@@ -1,5 +1,5 @@
github.com/sirupsen/logrus v1.0.0
-github.com/containers/storage 9e0c323a4b425557f8310ee8d125634acd39d8f5
+github.com/containers/storage 47536c89fcc545a87745e1a1573addc439409165
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
@@ -22,7 +22,7 @@ github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
github.com/pkg/errors 248dadf4e9068a0b3e79f02ed0a610d935de5302
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
-github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721
+github.com/vbatts/tar-split v0.10.2
golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8
golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe
golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
@@ -36,5 +36,4 @@ github.com/tchap/go-patricia v2.2.6
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
-github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
-github.com/pquerna/ffjson master
+github.com/gogo/protobuf/proto fcdc5011193ff531a548e9b0301828d5a5b97fd8