summaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/containers/image/copy/copy.go101
-rw-r--r--vendor/github.com/containers/image/directory/directory_dest.go27
-rw-r--r--vendor/github.com/containers/image/directory/directory_src.go4
-rw-r--r--vendor/github.com/containers/image/docker/cache.go23
-rw-r--r--vendor/github.com/containers/image/docker/docker_client.go59
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_dest.go159
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_src.go5
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/dest.go37
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/src.go4
-rw-r--r--vendor/github.com/containers/image/image/docker_schema2.go7
-rw-r--r--vendor/github.com/containers/image/image/oci.go3
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_dest.go31
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_src.go8
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_dest.go33
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_src.go6
-rw-r--r--vendor/github.com/containers/image/openshift/openshift.go34
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_dest.go36
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_src.go6
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go329
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/default.go63
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/memory.go123
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/none.go47
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go108
-rw-r--r--vendor/github.com/containers/image/storage/storage_image.go110
-rw-r--r--vendor/github.com/containers/image/tarball/tarball_src.go5
-rw-r--r--vendor/github.com/containers/image/types/types.go102
-rw-r--r--vendor/github.com/containers/image/vendor.conf1
27 files changed, 1242 insertions, 229 deletions
diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go
index 313d802b3..013080e8d 100644
--- a/vendor/github.com/containers/image/copy/copy.go
+++ b/vendor/github.com/containers/image/copy/copy.go
@@ -13,6 +13,7 @@ import (
"time"
"github.com/containers/image/image"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/pkg/compression"
"github.com/containers/image/signature"
"github.com/containers/image/transports"
@@ -24,14 +25,16 @@ import (
)
type digestingReader struct {
- source io.Reader
- digester digest.Digester
- expectedDigest digest.Digest
- validationFailed bool
+ source io.Reader
+ digester digest.Digester
+ expectedDigest digest.Digest
+ validationFailed bool
+ validationSucceeded bool
}
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
-// and set validationFailed to true if the source stream does not match expectedDigest.
+// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
+// (neither is set if EOF is never reached).
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
if err := expectedDigest.Validate(); err != nil {
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
@@ -64,6 +67,7 @@ func (d *digestingReader) Read(p []byte) (int, error) {
d.validationFailed = true
return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
}
+ d.validationSucceeded = true
}
return n, err
}
@@ -71,21 +75,22 @@ func (d *digestingReader) Read(p []byte) (int, error) {
// copier allows us to keep track of diffID values for blobs, and other
// data shared across one or more images in a possible manifest list.
type copier struct {
- cachedDiffIDs map[digest.Digest]digest.Digest
dest types.ImageDestination
rawSource types.ImageSource
reportWriter io.Writer
progressInterval time.Duration
progress chan types.ProgressProperties
+ blobInfoCache types.BlobInfoCache
}
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
type imageCopier struct {
- c *copier
- manifestUpdates *types.ManifestUpdateOptions
- src types.Image
- diffIDsAreNeeded bool
- canModifyManifest bool
+ c *copier
+ manifestUpdates *types.ManifestUpdateOptions
+ src types.Image
+ diffIDsAreNeeded bool
+ canModifyManifest bool
+ canSubstituteBlobs bool
}
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
@@ -141,12 +146,15 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
}()
c := &copier{
- cachedDiffIDs: make(map[digest.Digest]digest.Digest),
dest: dest,
rawSource: rawSource,
reportWriter: reportWriter,
progressInterval: options.ProgressInterval,
progress: options.Progress,
+ // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
+ // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
+ // we might want to add a separate CommonCtx — or would that be too confusing?
+ blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
}
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
@@ -235,6 +243,13 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
src: src,
// diffIDsAreNeeded is computed later
canModifyManifest: len(sigs) == 0,
+ // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
+ // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
+ // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
+ // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
+ // that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
+ // and we would reuse and sign it.
+ canSubstituteBlobs: len(sigs) == 0 && options.SignBy == "",
}
if err := ic.updateEmbeddedDockerReference(); err != nil {
@@ -498,32 +513,24 @@ type diffIDResult struct {
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
- // Check if we already have a blob with this digest
- haveBlob, extantBlobSize, err := ic.c.dest.HasBlob(ctx, srcInfo)
- if err != nil {
- return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
- }
- // If we already have a cached diffID for this blob, we don't need to compute it
- diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.c.cachedDiffIDs[srcInfo.Digest] == "")
- // If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again
- if haveBlob && !diffIDIsNeeded {
- // Check the blob sizes match, if we were given a size this time
- if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize {
- return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size)
- }
- srcInfo.Size = extantBlobSize
- // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob
- blobinfo, err := ic.c.dest.ReapplyBlob(ctx, srcInfo)
+ cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
+ diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
+
+ // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
+ if !diffIDIsNeeded {
+ reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
if err != nil {
- return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest)
+ return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
+ }
+ if reused {
+ ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest)
+ return blobInfo, cachedDiffID, nil
}
- ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest)
- return blobinfo, ic.c.cachedDiffIDs[srcInfo.Digest], err
}
// Fallback: copy the layer, computing the diffID if we need to do so
ic.c.Printf("Copying blob %s\n", srcInfo.Digest)
- srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo)
+ srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
if err != nil {
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
}
@@ -543,11 +550,13 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
}
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
- ic.c.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
+ // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
+ // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
+ ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
return blobInfo, diffIDResult.digest, nil
}
} else {
- return blobInfo, ic.c.cachedDiffIDs[srcInfo.Digest], nil
+ return blobInfo, cachedDiffID, nil
}
}
@@ -624,7 +633,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
// === Process input through digestingReader to validate against the expected digest.
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
// use a separate validation failure indicator.
- // Note that we don't use a stronger "validationSucceeded" indicator, because
+ // Note that for this check we don't use the stronger "validationSucceeded" indicator, because
// dest.PutBlob may detect that the layer already exists, in which case we don't
// read stream to the end, and validation does not happen.
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
@@ -660,8 +669,10 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
// === Deal with layer compression/decompression if necessary
var inputInfo types.BlobInfo
+ var compressionOperation types.LayerCompression
if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
logrus.Debugf("Compressing blob on the fly")
+ compressionOperation = types.Compress
pipeReader, pipeWriter := io.Pipe()
defer pipeReader.Close()
@@ -674,6 +685,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
inputInfo.Size = -1
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
logrus.Debugf("Blob will be decompressed")
+ compressionOperation = types.Decompress
s, err := decompressor(destStream)
if err != nil {
return types.BlobInfo{}, err
@@ -684,6 +696,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
inputInfo.Size = -1
} else {
logrus.Debugf("Using original blob without modification")
+ compressionOperation = types.PreserveOriginal
inputInfo = srcInfo
}
@@ -699,7 +712,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
}
// === Finally, send the layer stream to dest.
- uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, isConfig)
+ uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
}
@@ -722,6 +735,22 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
}
+ if digestingReader.validationSucceeded {
+ // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
+ // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
+ // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
+ // (because inputInfo.Digest == "", this must have been computed afresh).
+ switch compressionOperation {
+ case types.PreserveOriginal:
+ break // Do nothing, we have only one digest and we might not have even verified it.
+ case types.Compress:
+ c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
+ case types.Decompress:
+ c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
+ default:
+ return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
+ }
+ }
return uploadedInfo, nil
}
diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go
index d888931fe..d75c195b2 100644
--- a/vendor/github.com/containers/image/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/directory/directory_dest.go
@@ -127,10 +127,11 @@ func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
if err != nil {
return types.BlobInfo{}, err
@@ -169,27 +170,27 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (d *dirImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
}
blobPath := d.ref.layerPath(info.Digest)
finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) {
- return false, -1, nil
+ return false, types.BlobInfo{}, nil
}
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
- return true, finfo.Size(), nil
-}
+ return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
-func (d *dirImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go
index 5e17c37c0..3625def80 100644
--- a/vendor/github.com/containers/image/directory/directory_src.go
+++ b/vendor/github.com/containers/image/directory/directory_src.go
@@ -49,7 +49,9 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
r, err := os.Open(s.ref.layerPath(info.Digest))
if err != nil {
return nil, -1, err
diff --git a/vendor/github.com/containers/image/docker/cache.go b/vendor/github.com/containers/image/docker/cache.go
new file mode 100644
index 000000000..64ad57a7c
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/cache.go
@@ -0,0 +1,23 @@
+package docker
+
+import (
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/types"
+)
+
+// bicTransportScope returns a BICTransportScope appropriate for ref.
+func bicTransportScope(ref dockerReference) types.BICTransportScope {
+ // Blobs can be reused across the whole registry.
+ return types.BICTransportScope{Opaque: reference.Domain(ref.ref)}
+}
+
+// newBICLocationReference returns a BICLocationReference appropriate for ref.
+func newBICLocationReference(ref dockerReference) types.BICLocationReference {
+ // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob).
+ return types.BICLocationReference{Opaque: ref.ref.Name()}
+}
+
+// parseBICLocationReference returns a repository for encoded lr.
+func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) {
+ return reference.ParseNormalizedNamed(lr.Opaque)
+}
diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go
index ea1a8ec06..7f55dbe7f 100644
--- a/vendor/github.com/containers/image/docker/docker_client.go
+++ b/vendor/github.com/containers/image/docker/docker_client.go
@@ -70,10 +70,11 @@ type extensionSignatureList struct {
}
type bearerToken struct {
- Token string `json:"token"`
- AccessToken string `json:"access_token"`
- ExpiresIn int `json:"expires_in"`
- IssuedAt time.Time `json:"issued_at"`
+ Token string `json:"token"`
+ AccessToken string `json:"access_token"`
+ ExpiresIn int `json:"expires_in"`
+ IssuedAt time.Time `json:"issued_at"`
+ expirationTime time.Time
}
// dockerClient is configuration for dealing with a single Docker registry.
@@ -88,14 +89,14 @@ type dockerClient struct {
password string
signatureBase signatureStorageBase
scope authScope
+ extraScope *authScope // If non-nil, a temporary extra token scope (necessary for mounting from another repo)
// The following members are detected registry properties:
// They are set after a successful detectProperties(), and never change afterwards.
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
challenges []challenge
supportsSignatures bool
- // The following members are private state for setupRequestAuth, both are valid if token != nil.
- token *bearerToken
- tokenExpiration time.Time
+ // Private state for setupRequestAuth
+ tokenCache map[string]bearerToken
}
type authScope struct {
@@ -131,6 +132,7 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
if token.IssuedAt.IsZero() {
token.IssuedAt = time.Now().UTC()
}
+ token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
return token, nil
}
@@ -260,6 +262,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
registry: registry,
client: &http.Client{Transport: tr},
insecureSkipTLSVerify: skipVerify,
+ tokenCache: map[string]bearerToken{},
}, nil
}
@@ -463,24 +466,23 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
req.SetBasicAuth(c.username, c.password)
return nil
case "bearer":
- if c.token == nil || time.Now().After(c.tokenExpiration) {
- realm, ok := challenge.Parameters["realm"]
- if !ok {
- return errors.Errorf("missing realm in bearer auth challenge")
- }
- service, _ := challenge.Parameters["service"] // Will be "" if not present
- var scope string
- if c.scope.remoteName != "" && c.scope.actions != "" {
- scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
- }
- token, err := c.getBearerToken(req.Context(), realm, service, scope)
+ cacheKey := ""
+ scopes := []authScope{c.scope}
+ if c.extraScope != nil {
+ // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
+ cacheKey = fmt.Sprintf("%s:%s", c.extraScope.remoteName, c.extraScope.actions)
+ scopes = append(scopes, *c.extraScope)
+ }
+ token, ok := c.tokenCache[cacheKey]
+ if !ok || time.Now().After(token.expirationTime) {
+ t, err := c.getBearerToken(req.Context(), challenge, scopes)
if err != nil {
return err
}
- c.token = token
- c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
+ token = *t
+ c.tokenCache[cacheKey] = token
}
- req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token))
return nil
default:
logrus.Debugf("no handler for %s authentication", challenge.Scheme)
@@ -490,7 +492,12 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
return nil
}
-func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) {
+func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) {
+ realm, ok := challenge.Parameters["realm"]
+ if !ok {
+ return nil, errors.Errorf("missing realm in bearer auth challenge")
+ }
+
authReq, err := http.NewRequest("GET", realm, nil)
if err != nil {
return nil, err
@@ -500,11 +507,13 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
if c.username != "" {
getParams.Add("account", c.username)
}
- if service != "" {
+ if service, ok := challenge.Parameters["service"]; ok && service != "" {
getParams.Add("service", service)
}
- if scope != "" {
- getParams.Add("scope", scope)
+ for _, scope := range scopes {
+ if scope.remoteName != "" && scope.actions != "" {
+ getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
+ }
}
authReq.URL.RawQuery = getParams.Encode()
if c.username != "" && c.password != "" {
diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go
index 9bbffef93..2f471f648 100644
--- a/vendor/github.com/containers/image/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/docker/docker_image_dest.go
@@ -15,6 +15,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
@@ -113,17 +114,21 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
if inputInfo.Digest.String() != "" {
- haveBlob, size, err := d.HasBlob(ctx, inputInfo)
+ // This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
+ // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
+ // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
+ haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, blobinfocache.NoCache, false)
if err != nil {
return types.BlobInfo{}, err
}
if haveBlob {
- return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
+ return reusedInfo, nil
}
}
@@ -160,7 +165,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
}
- // FIXME: DELETE uploadLocation on failure
+ // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope)
locationQuery := uploadLocation.Query()
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
@@ -177,19 +182,15 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
logrus.Debugf("Upload of layer %s complete", computedDigest)
+ cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref))
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
+// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
+// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
// it returns a non-nil error only on an unexpected failure.
-func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
- if info.Digest == "" {
- return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
- }
- checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
-
+func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest) (bool, int64, error) {
+ checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
logrus.Debugf("Checking %s", checkPath)
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth)
if err != nil {
@@ -202,7 +203,7 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
return true, getBlobSize(res), nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
- return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", info.Digest, d.ref.ref.Name())
+ return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
case http.StatusNotFound:
logrus.Debugf("... not present")
return false, -1, nil
@@ -211,8 +212,134 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
}
}
-func (d *dockerImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+// mountBlob tries to mount blob srcDigest from srcRepo to the current destination.
+func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest) error {
+ u := url.URL{
+ Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)),
+ RawQuery: url.Values{
+ "mount": {srcDigest.String()},
+ "from": {reference.Path(srcRepo)},
+ }.Encode(),
+ }
+ mountPath := u.String()
+ logrus.Debugf("Trying to mount %s", mountPath)
+ res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth)
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+ switch res.StatusCode {
+ case http.StatusCreated:
+ logrus.Debugf("... mount OK")
+ return nil
+ case http.StatusAccepted:
+ // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process.
+ // Abort, and let the ultimate caller do an upload when its ready, instead.
+ // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
+ uploadLocation, err := res.Location()
+ if err != nil {
+ return errors.Wrap(err, "Error determining upload URL after a mount attempt")
+ }
+ logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
+ res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth)
+ if err != nil {
+ logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
+ } else {
+ defer res2.Body.Close()
+ if res2.StatusCode != http.StatusNoContent {
+ logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode))
+ }
+ }
+ // Anyway, if canceling the upload fails, ignore it and return the more important error:
+ return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
+ default:
+ logrus.Debugf("Error mounting, response %#v", *res)
+ return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
+ }
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ if info.Digest == "" {
+ return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ }
+
+ // First, check whether the blob happens to already exist at the destination.
+ exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest)
+ if err != nil {
+ return false, types.BlobInfo{}, err
+ }
+ if exists {
+ cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
+ return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
+ }
+
+ // Then try reusing blobs from other locations.
+
+ // Checking candidateRepo, and mounting from it, requires an expanded token scope.
+ // We still want to reuse the ping information and other aspects of the client, so rather than make a fresh copy, there is this a bit ugly extraScope hack.
+ if d.c.extraScope != nil {
+ return false, types.BlobInfo{}, errors.New("Internal error: dockerClient.extraScope was set before TryReusingBlob")
+ }
+ defer func() {
+ d.c.extraScope = nil
+ }()
+ for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) {
+ candidateRepo, err := parseBICLocationReference(candidate.Location)
+ if err != nil {
+ logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
+ continue
+ }
+ logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name())
+
+ // Sanity checks:
+ if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
+ logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
+ continue
+ }
+ if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
+ logrus.Debug("... Already tried the primary destination")
+ continue
+ }
+
+ // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway.
+ d.c.extraScope = &authScope{
+ remoteName: reference.Path(candidateRepo),
+ actions: "pull",
+ }
+ // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
+ // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
+ // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure.
+ // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly.
+ // Even worse, docker/distribution does not actually reasonably implement canceling uploads
+ // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
+ // so, be a nice client and don't create unnecesary upload sessions on the server.
+ exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest)
+ if err != nil {
+ logrus.Debugf("... Failed: %v", err)
+ continue
+ }
+ if !exists {
+ // FIXME? Should we drop the blob from cache here (and elsewhere?)?
+ continue // logrus.Debug() already happened in blobExists
+ }
+ if candidateRepo.Name() != d.ref.ref.Name() {
+ if err := d.mountBlob(ctx, candidateRepo, candidate.Digest); err != nil {
+ logrus.Debugf("... Mount failed: %v", err)
+ continue
+ }
+ }
+ cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
+ return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil
+ }
+
+ return false, types.BlobInfo{}, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go
index aedab9731..fbed6297f 100644
--- a/vendor/github.com/containers/image/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/docker/docker_image_src.go
@@ -162,7 +162,9 @@ func getBlobSize(resp *http.Response) int64 {
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if len(info.URLs) != 0 {
return s.getExternalBlob(ctx, info.URLs)
}
@@ -177,6 +179,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (i
// print url also
return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
}
+ cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref))
return res.Body, getBlobSize(res), nil
}
diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go
index d6510ccf1..ad8a48a03 100644
--- a/vendor/github.com/containers/image/docker/tarfile/dest.go
+++ b/vendor/github.com/containers/image/docker/tarfile/dest.go
@@ -85,10 +85,11 @@ func (d *Destination) IgnoresEmbeddedDockerReference() bool {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
@@ -120,12 +121,12 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
}
// Maybe the blob has been already sent
- ok, size, err := d.HasBlob(ctx, inputInfo)
+ ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false)
if err != nil {
return types.BlobInfo{}, err
}
if ok {
- return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
+ return reusedInfo, nil
}
if isConfig {
@@ -151,29 +152,21 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with
-// the matching digest which can be reapplied using ReapplyBlob. Unlike
-// PutBlob, the digest can not be empty. If HasBlob returns true, the size of
-// the blob must also be returned. If the destination does not contain the
-// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it
-// returns a non-nil error only on an unexpected failure.
-func (d *Destination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, -1, errors.Errorf("Can not check for a blob with unknown digest")
+ return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
}
if blob, ok := d.blobs[info.Digest]; ok {
- return true, blob.Size, nil
+ return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
}
- return false, -1, nil
-}
-
-// ReapplyBlob informs the image destination that a blob for which HasBlob
-// previously returned true would have been passed to PutBlob if it had
-// returned false. Like HasBlob and unlike PutBlob, the digest can not be
-// empty. If the blob is a filesystem layer, this signifies that the changes
-// it describes need to be applied again when composing a filesystem tree.
-func (d *Destination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+ return false, types.BlobInfo{}, nil
}
func (d *Destination) createRepositoriesFile(rootLayerID string) error {
diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go
index 942893a81..d94ed9783 100644
--- a/vendor/github.com/containers/image/docker/tarfile/src.go
+++ b/vendor/github.com/containers/image/docker/tarfile/src.go
@@ -398,7 +398,9 @@ func (r uncompressedReadCloser) Close() error {
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if err := s.ensureCachedDataIsPresent(); err != nil {
return nil, 0, err
}
diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go
index b639ab714..cee60f824 100644
--- a/vendor/github.com/containers/image/image/docker_schema2.go
+++ b/vendor/github.com/containers/image/image/docker_schema2.go
@@ -11,6 +11,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -95,7 +96,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
}
- stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor))
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), blobinfocache.NoCache)
if err != nil {
return nil, err
}
@@ -249,7 +250,9 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
if historyEntry.EmptyLayer {
if !haveGzippedEmptyLayer {
logrus.Debugf("Uploading empty layer during conversion to schema 1")
- info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, false)
+ // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
+ // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
+ info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, blobinfocache.NoCache, false)
if err != nil {
return nil, errors.Wrap(err, "Error uploading empty layer")
}
diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go
index 298db360d..6fe2a9a32 100644
--- a/vendor/github.com/containers/image/image/oci.go
+++ b/vendor/github.com/containers/image/image/oci.go
@@ -7,6 +7,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -60,7 +61,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
}
- stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config))
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), blobinfocache.NoCache)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/oci/archive/oci_dest.go b/vendor/github.com/containers/image/oci/archive/oci_dest.go
index 3c6b7dffa..3997ac2ee 100644
--- a/vendor/github.com/containers/image/oci/archive/oci_dest.go
+++ b/vendor/github.com/containers/image/oci/archive/oci_dest.go
@@ -77,20 +77,27 @@ func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool {
return d.unpackedDest.IgnoresEmbeddedDockerReference()
}
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
-func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
- return d.unpackedDest.PutBlob(ctx, stream, inputInfo, isConfig)
-}
-
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob
-func (d *ociArchiveImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
- return d.unpackedDest.HasBlob(ctx, info)
-}
-
-func (d *ociArchiveImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return d.unpackedDest.ReapplyBlob(ctx, info)
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig)
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute)
}
// PutManifest writes manifest to the destination
diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go
index d04773c1f..084d818f7 100644
--- a/vendor/github.com/containers/image/oci/archive/oci_src.go
+++ b/vendor/github.com/containers/image/oci/archive/oci_src.go
@@ -76,9 +76,11 @@ func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest
return s.unpackedSrc.GetManifest(ctx, instanceDigest)
}
-// GetBlob returns a stream for the specified blob, and the blob's size.
-func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
- return s.unpackedSrc.GetBlob(ctx, info)
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ return s.unpackedSrc.GetBlob(ctx, info, cache)
}
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go
index 351632750..b5a6e08f8 100644
--- a/vendor/github.com/containers/image/oci/layout/oci_dest.go
+++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go
@@ -107,13 +107,15 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, DockerReference() returns nil.
}
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
if err != nil {
return types.BlobInfo{}, err
@@ -173,30 +175,29 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (d *ociImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
}
blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) {
- return false, -1, nil
+ return false, types.BlobInfo{}, nil
}
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
- return true, finfo.Size(), nil
-}
-
-func (d *ociImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+ return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go
index 33115c00d..086a7040d 100644
--- a/vendor/github.com/containers/image/oci/layout/oci_src.go
+++ b/vendor/github.com/containers/image/oci/layout/oci_src.go
@@ -92,8 +92,10 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
return m, mimeType, nil
}
-// GetBlob returns a stream for the specified blob, and the blob's size.
-func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if len(info.URLs) != 0 {
return s.getExternalBlob(ctx, info.URLs)
}
diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go
index dbd04f10b..0cce1e6c7 100644
--- a/vendor/github.com/containers/image/openshift/openshift.go
+++ b/vendor/github.com/containers/image/openshift/openshift.go
@@ -212,11 +212,13 @@ func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if err := s.ensureImageIsResolved(ctx); err != nil {
return nil, 0, err
}
- return s.docker.GetBlob(ctx, info)
+ return s.docker.GetBlob(ctx, info, cache)
}
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
@@ -379,23 +381,23 @@ func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
- return d.docker.PutBlob(ctx, stream, inputInfo, isConfig)
-}
-
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (d *openshiftImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
- return d.docker.HasBlob(ctx, info)
-}
-
-func (d *openshiftImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return d.docker.ReapplyBlob(ctx, info)
+func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig)
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute)
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go
index afff7dc1b..064898948 100644
--- a/vendor/github.com/containers/image/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/ostree/ostree_dest.go
@@ -132,7 +132,15 @@ func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, DockerReference() returns nil.
}
-func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+// PutBlob writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
if err != nil {
return types.BlobInfo{}, err
@@ -322,12 +330,18 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
}
-func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
-
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if d.repo == nil {
repo, err := openRepo(d.ref.repo)
if err != nil {
- return false, 0, err
+ return false, types.BlobInfo{}, err
}
d.repo = repo
}
@@ -335,29 +349,25 @@ func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInf
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
- return found, -1, err
+ return found, types.BlobInfo{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
if err != nil || !found {
- return found, -1, err
+ return found, types.BlobInfo{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.size")
if err != nil || !found {
- return found, -1, err
+ return found, types.BlobInfo{}, err
}
size, err := strconv.ParseInt(data, 10, 64)
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
- return true, size, nil
-}
-
-func (d *ostreeImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+ return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go
index 1f325b2a7..e73cae198 100644
--- a/vendor/github.com/containers/image/ostree/ostree_src.go
+++ b/vendor/github.com/containers/image/ostree/ostree_src.go
@@ -255,8 +255,10 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
return getter.Get(path)
}
-// GetBlob returns a stream for the specified blob, and the blob's size.
-func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
blob := info.Digest.Hex()
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go
new file mode 100644
index 000000000..4ee809134
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go
@@ -0,0 +1,329 @@
+package blobinfocache
+
+import (
+ "fmt"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/boltdb/bolt"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade
+ // we can simply start over with a different filename; update blobInfoCacheFilename.
+
+ // FIXME: For CRI-O, does this need to hide information between different users?
+
+ // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
+ uncompressedDigestBucket = []byte("uncompressedDigest")
+ // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest
+ // (as a set of key=digest, value="" pairs)
+ digestByUncompressedBucket = []byte("digestByUncompressed")
+ // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing
+ // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value).
+ knownLocationsBucket = []byte("knownLocations")
+)
+
+// Concurrency:
+// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely
+// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time.
+
+// pathLock contains a lock for a specific BoltDB database path.
+type pathLock struct {
+ refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below!
+ mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database.
+}
+
+var (
+ // pathLocks contains a lock for each currently open file.
+ // This must be global so that independently created instances of boltDBCache exclude each other.
+ // The map is protected by pathLocksMutex.
+ // FIXME? Should this be based on device:inode numbers instead of paths instead?
+ pathLocks = map[string]*pathLock{}
+ pathLocksMutex = sync.Mutex{}
+)
+
+// lockPath obtains the pathLock for path.
+// The caller must call unlockPath eventually.
+func lockPath(path string) {
+ pl := func() *pathLock { // A scope for defer
+ pathLocksMutex.Lock()
+ defer pathLocksMutex.Unlock()
+ pl, ok := pathLocks[path]
+ if ok {
+ pl.refCount++
+ } else {
+ pl = &pathLock{refCount: 1, mutex: sync.Mutex{}}
+ pathLocks[path] = pl
+ }
+ return pl
+ }()
+ pl.mutex.Lock()
+}
+
+// unlockPath releases the pathLock for path.
+func unlockPath(path string) {
+ pathLocksMutex.Lock()
+ defer pathLocksMutex.Unlock()
+ pl, ok := pathLocks[path]
+ if !ok {
+ // Should this return an error instead? BlobInfoCache ultimately ignores errors…
+ panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path))
+ }
+ pl.mutex.Unlock()
+ pl.refCount--
+ if pl.refCount == 0 {
+ delete(pathLocks, path)
+ }
+}
+
+// boltDBCache si a BlobInfoCache implementation which uses a BoltDB file at the specified path.
+//
+// Note that we don’t keep the database open across operations, because that would lock the file and block any other
+// users; instead, we need to open/close it for every single write or lookup.
+type boltDBCache struct {
+ path string
+}
+
+// NewBoltDBCache returns a BlobInfoCache implementation which uses a BoltDB file at path.
+// Most users should call DefaultCache instead.
+func NewBoltDBCache(path string) types.BlobInfoCache {
+ return &boltDBCache{path: path}
+}
+
+// view returns runs the specified fn within a read-only transaction on the database.
+func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) {
+ // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
+ // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
+ // a read lock, blocking any future writes.
+ // Hence this preliminary check, which is RACY: Another process could remove the file
+ // between the Lstat call and opening the database.
+ if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) {
+ return err
+ }
+
+ lockPath(bdc.path)
+ defer unlockPath(bdc.path)
+ db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true})
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := db.Close(); retErr == nil && err != nil {
+ retErr = err
+ }
+ }()
+
+ return db.View(fn)
+}
+
+// update returns runs the specified fn within a read-write transaction on the database.
+func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) {
+ lockPath(bdc.path)
+ defer unlockPath(bdc.path)
+ db, err := bolt.Open(bdc.path, 0600, nil)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := db.Close(); retErr == nil && err != nil {
+ retErr = err
+ }
+ }()
+
+ return db.Update(fn)
+}
+
+// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
+func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
+ if b := tx.Bucket(uncompressedDigestBucket); b != nil {
+ if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
+ d, err := digest.Parse(string(uncompressedBytes))
+ if err == nil {
+ return d
+ }
+ // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+ }
+ // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest.
+ // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
+ // when we already record a (compressed, uncompressed) pair.
+ if b := tx.Bucket(digestByUncompressedBucket); b != nil {
+ if b = b.Bucket([]byte(anyDigest.String())); b != nil {
+ c := b.Cursor()
+ if k, _ := c.First(); k != nil { // The bucket is non-empty
+ return anyDigest
+ }
+ }
+ }
+ return ""
+}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ var res digest.Digest
+ if err := bdc.view(func(tx *bolt.Tx) error {
+ res = bdc.uncompressedDigest(tx, anyDigest)
+ return nil
+ }); err != nil { // Including os.IsNotExist(err)
+ return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+ return res
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+ _ = bdc.update(func(tx *bolt.Tx) error {
+ b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
+ if err != nil {
+ return err
+ }
+ key := []byte(anyDigest.String())
+ if previousBytes := b.Get(key); previousBytes != nil {
+ previous, err := digest.Parse(string(previousBytes))
+ if err != nil {
+ return err
+ }
+ if previous != uncompressed {
+ logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
+ }
+ }
+ if err := b.Put(key, []byte(uncompressed.String())); err != nil {
+ return err
+ }
+
+ b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket)
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String()))
+ if err != nil {
+ return err
+ }
+ if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again.
+ return err
+ }
+ return nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+ _ = bdc.update(func(tx *bolt.Tx) error {
+ b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(transport.Name()))
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque))
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String()))
+ if err != nil {
+ return err
+ }
+ value, err := time.Now().MarshalBinary()
+ if err != nil {
+ return err
+ }
+ if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry.
+ return err
+ }
+ return nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// appendReplacementCandiates creates candidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
+func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []candidateWithTime {
+ b := scopeBucket.Bucket([]byte(digest.String()))
+ if b == nil {
+ return candidates
+ }
+ _ = b.ForEach(func(k, v []byte) error {
+ t := time.Time{}
+ if err := t.UnmarshalBinary(v); err != nil {
+ return err
+ }
+ candidates = append(candidates, candidateWithTime{
+ candidate: types.BICReplacementCandidate{
+ Digest: digest,
+ Location: types.BICLocationReference{Opaque: string(k)},
+ },
+ lastSeen: t,
+ })
+ return nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+ return candidates
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ res := []candidateWithTime{}
+ var uncompressedDigestValue digest.Digest // = ""
+ if err := bdc.view(func(tx *bolt.Tx) error {
+ scopeBucket := tx.Bucket(knownLocationsBucket)
+ if scopeBucket == nil {
+ return nil
+ }
+ scopeBucket = scopeBucket.Bucket([]byte(transport.Name()))
+ if scopeBucket == nil {
+ return nil
+ }
+ scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque))
+ if scopeBucket == nil {
+ return nil
+ }
+
+ res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest)
+ if canSubstitute {
+ if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
+ b := tx.Bucket(digestByUncompressedBucket)
+ if b != nil {
+ b = b.Bucket([]byte(uncompressedDigestValue.String()))
+ if b != nil {
+ if err := b.ForEach(func(k, _ []byte) error {
+ d, err := digest.Parse(string(k))
+ if err != nil {
+ return err
+ }
+ if d != primaryDigest && d != uncompressedDigestValue {
+ res = bdc.appendReplacementCandidates(res, scopeBucket, d)
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ }
+ }
+ if uncompressedDigestValue != primaryDigest {
+ res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue)
+ }
+ }
+ }
+ return nil
+ }); err != nil { // Including os.IsNotExist(err)
+ return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+
+ return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/pkg/blobinfocache/default.go
new file mode 100644
index 000000000..6da9f2805
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/default.go
@@ -0,0 +1,63 @@
+package blobinfocache
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/image/types"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // blobInfoCacheFilename is the file name used for blob info caches.
+ // If the format changes in an incompatible way, increase the version number.
+ blobInfoCacheFilename = "blob-info-cache-v1.boltdb"
+ // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes.
+ systemBlobInfoCacheDir = "/var/lib/containers/cache"
+)
+
+// blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid.
+// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory.
+func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) {
+ if sys != nil && sys.BlobInfoCacheDir != "" {
+ return sys.BlobInfoCacheDir, nil
+ }
+
+ // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged
+ // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe.
+ if euid == 0 {
+ if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil
+ }
+ return systemBlobInfoCacheDir, nil
+ }
+
+ // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts.
+ dataDir := os.Getenv("XDG_DATA_HOME")
+ if dataDir == "" {
+ home := os.Getenv("HOME")
+ if home == "" {
+ return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty")
+ }
+ dataDir = filepath.Join(home, ".local", "share")
+ }
+ return filepath.Join(dataDir, "containers", "cache"), nil
+}
+
+// DefaultCache returns the default BlobInfoCache implementation appropriate for sys.
+func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
+ dir, err := blobInfoCacheDir(sys, os.Geteuid())
+ if err != nil {
+ logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
+ return NewMemoryCache()
+ }
+ path := filepath.Join(dir, blobInfoCacheFilename)
+ if err := os.MkdirAll(dir, 0700); err != nil {
+ logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", err)
+ return NewMemoryCache()
+ }
+
+ logrus.Debugf("Using blob info cache at %s", path)
+ return NewBoltDBCache(path)
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/memory.go b/vendor/github.com/containers/image/pkg/blobinfocache/memory.go
new file mode 100644
index 000000000..1ce7dee13
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/memory.go
@@ -0,0 +1,123 @@
+package blobinfocache
+
+import (
+ "time"
+
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+// locationKey only exists to make lookup in knownLocations easier.
+type locationKey struct {
+ transport string
+ scope types.BICTransportScope
+ blobDigest digest.Digest
+}
+
+// memoryCache implements an in-memory-only BlobInfoCache
+type memoryCache struct {
+ uncompressedDigests map[digest.Digest]digest.Digest
+ digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
+ knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
+}
+
+// NewMemoryCache returns a BlobInfoCache implementation which is in-memory only.
+// This is primarily intended for tests, but also used as a fallback if DefaultCache
+// can’t determine, or set up, the location for a persistent cache.
+// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache.
+func NewMemoryCache() types.BlobInfoCache {
+ return &memoryCache{
+ uncompressedDigests: map[digest.Digest]digest.Digest{},
+ digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
+ knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
+ }
+}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ if d, ok := mem.uncompressedDigests[anyDigest]; ok {
+ return d
+ }
+ // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest.
+ // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
+ // when we already record a (compressed, uncompressed) pair.
+ if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 {
+ return anyDigest
+ }
+ return ""
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+ if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
+ logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
+ }
+ mem.uncompressedDigests[anyDigest] = uncompressed
+
+ anyDigestSet, ok := mem.digestsByUncompressed[uncompressed]
+ if !ok {
+ anyDigestSet = map[digest.Digest]struct{}{}
+ mem.digestsByUncompressed[uncompressed] = anyDigestSet
+ }
+ anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+ key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
+ locationScope, ok := mem.knownLocations[key]
+ if !ok {
+ locationScope = map[types.BICLocationReference]time.Time{}
+ mem.knownLocations[key] = locationScope
+ }
+ locationScope[location] = time.Now() // Possibly overwriting an older entry.
+}
+
+// appendReplacementCandiates creates candidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
+func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []candidateWithTime {
+ locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
+ for l, t := range locations {
+ candidates = append(candidates, candidateWithTime{
+ candidate: types.BICReplacementCandidate{
+ Digest: digest,
+ Location: l,
+ },
+ lastSeen: t,
+ })
+ }
+ return candidates
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ res := []candidateWithTime{}
+ res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
+ var uncompressedDigest digest.Digest // = ""
+ if canSubstitute {
+ if uncompressedDigest = mem.UncompressedDigest(primaryDigest); uncompressedDigest != "" {
+ otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
+ for d := range otherDigests {
+ if d != primaryDigest && d != uncompressedDigest {
+ res = mem.appendReplacementCandidates(res, transport, scope, d)
+ }
+ }
+ if uncompressedDigest != primaryDigest {
+ res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest)
+ }
+ }
+ }
+ return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/none.go b/vendor/github.com/containers/image/pkg/blobinfocache/none.go
new file mode 100644
index 000000000..5658d89ff
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/none.go
@@ -0,0 +1,47 @@
+package blobinfocache
+
+import (
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// noCache implements a dummy BlobInfoCache which records no data.
+type noCache struct {
+}
+
+// NoCache implements BlobInfoCache by not recording any data.
+//
+// This exists primarily for implementations of configGetter for Manifest.Inspect,
+// because configs only have one representation.
+// Any use of BlobInfoCache with blobs should usually use at least a short-lived cache.
+var NoCache types.BlobInfoCache = noCache{}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ return ""
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go b/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go
new file mode 100644
index 000000000..02709aa1c
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go
@@ -0,0 +1,108 @@
+package blobinfocache
+
+import (
+ "sort"
+ "time"
+
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates,
+// and therefore ultimately by types.BlobInfoCache.CandidateLocations.
+// This is a heuristic/guess, and could well use a different value.
+const replacementAttempts = 5
+
+// candidateWithTime is the input to types.BICReplacementCandidate prioritization.
+type candidateWithTime struct {
+ candidate types.BICReplacementCandidate // The replacement candidate
+ lastSeen time.Time // Time the candidate was last known to exist (either read or written)
+}
+
+// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
+// along with the specially-treated digest values for the implementation of sort.Interface.Less
+type candidateSortState struct {
+ cs []candidateWithTime // The entries to sort
+ primaryDigest digest.Digest // The digest the user actually asked for
+ uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
+}
+
+func (css *candidateSortState) Len() int {
+ return len(css.cs)
+}
+
+func (css *candidateSortState) Less(i, j int) bool {
+ xi := css.cs[i]
+ xj := css.cs[j]
+
+ // primaryDigest entries come first, more recent first.
+ // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first.
+ // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
+
+ // First, deal with the primaryDigest/uncompressedDigest cases:
+ if xi.candidate.Digest != xj.candidate.Digest {
+ // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
+ if xi.candidate.Digest == css.primaryDigest {
+ return true
+ }
+ if xj.candidate.Digest == css.primaryDigest {
+ return false
+ }
+ if css.uncompressedDigest != "" {
+ if xi.candidate.Digest == css.uncompressedDigest {
+ return false
+ }
+ if xj.candidate.Digest == css.uncompressedDigest {
+ return true
+ }
+ }
+ } else { // xi.candidate.Digest == xj.candidate.Digest
+ // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
+ if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) {
+ return xi.lastSeen.After(xj.lastSeen)
+ }
+ }
+
+ // Neither of the digests are primaryDigest/uncompressedDigest:
+ if !xi.lastSeen.Equal(xj.lastSeen) { // Order primarily by time
+ return xi.lastSeen.After(xj.lastSeen)
+ }
+ // Fall back to digest, if timestamps end up _exactly_ the same (how?!)
+ return xi.candidate.Digest < xj.candidate.Digest
+}
+
+func (css *candidateSortState) Swap(i, j int) {
+ css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
+}
+
+// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
+// number of entries to limit, only to make testing simpler.
+func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
+ // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
+ // compare equal.
+ sort.Sort(&candidateSortState{
+ cs: cs,
+ primaryDigest: primaryDigest,
+ uncompressedDigest: uncompressedDigest,
+ })
+
+ resLength := len(cs)
+ if resLength > maxCandidates {
+ resLength = maxCandidates
+ }
+ res := make([]types.BICReplacementCandidate, resLength)
+ for i := range res {
+ res[i] = cs[i].candidate
+ }
+ return res
+}
+
+// destructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
+// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
+// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
+//
+// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
+// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
+func destructivelyPrioritizeReplacementCandidates(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
+ return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
+}
diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go
index d1b010a76..bd6813119 100644
--- a/vendor/github.com/containers/image/storage/storage_image.go
+++ b/vendor/github.com/containers/image/storage/storage_image.go
@@ -16,6 +16,7 @@ import (
"github.com/containers/image/image"
"github.com/containers/image/internal/tmpdir"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
@@ -99,8 +100,10 @@ func (s storageImageSource) Close() error {
return nil
}
-// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given.
-func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
if info.Digest == image.GzippedEmptyLayerDigest {
return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
}
@@ -317,9 +320,17 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
}
-// PutBlob stores a layer or data blob in our temporary directory, checking that any information
-// in the blobinfo matches the incoming data.
-func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+// PutBlob writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ // Stores a layer or data blob in our temporary directory, checking that any information
+ // in the blobinfo matches the incoming data.
errorBlobInfo := types.BlobInfo{
Digest: "",
Size: -1,
@@ -370,6 +381,8 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
if blobSize < 0 {
blobSize = counter.Count
}
+ // This is safe because we have just computed both values ourselves.
+ cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
return types.BlobInfo{
Digest: blobDigest,
Size: blobSize,
@@ -377,59 +390,82 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be
-// reapplied using ReapplyBlob.
-//
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (s *storageImageDestination) HasBlob(ctx context.Context, blobinfo types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if blobinfo.Digest == "" {
- return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`)
}
if err := blobinfo.Digest.Validate(); err != nil {
- return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
}
+
// Check if we've already cached it in a file.
if size, ok := s.fileSizes[blobinfo.Digest]; ok {
- return true, size, nil
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: size,
+ MediaType: blobinfo.MediaType,
+ }, nil
}
+
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
}
if len(layers) > 0 {
// Save this for completeness.
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, layers[0].UncompressedSize, nil
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: layers[0].UncompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
}
+
// Check if we have a was-compressed layer in storage that's based on that blob.
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
}
if len(layers) > 0 {
// Record the uncompressed value so that we can use it to calculate layer IDs.
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, layers[0].CompressedSize, nil
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: layers[0].CompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+
+ // Does the blob correspond to a known DiffID which we already have available?
+ // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
+ // uncompressed layer, and that can happen only if canSubstitute.
+ if canSubstitute {
+ if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
+ if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest)
+ }
+ if len(layers) > 0 {
+ s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
+ return true, types.BlobInfo{
+ Digest: uncompressedDigest,
+ Size: layers[0].UncompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+ }
}
- // Nope, we don't have it.
- return false, -1, nil
-}
-// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the
-// same one when it walks the list in the manifest.
-func (s *storageImageDestination) ReapplyBlob(ctx context.Context, blobinfo types.BlobInfo) (types.BlobInfo, error) {
- present, size, err := s.HasBlob(ctx, blobinfo)
- if !present {
- return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo)
- }
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo)
- }
- blobinfo.Size = size
- return blobinfo, nil
+ // Nope, we don't have it.
+ return false, types.BlobInfo{}, nil
}
// computeID computes a recommended image ID based on information we have so far. If
@@ -514,8 +550,12 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
if !haveDiffID {
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
// or to even check if we had it.
+ // Use blobinfocache.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
+ // that relies on using a blob digest that has never been seeen by the store had better call
+ // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
+ // so far we are going to accommodate that (if we should be doing that at all).
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
- has, _, err := s.HasBlob(ctx, blob.BlobInfo)
+ has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, blobinfocache.NoCache, false)
if err != nil {
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
}
diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/tarball/tarball_src.go
index 17af60b30..ee963b8d8 100644
--- a/vendor/github.com/containers/image/tarball/tarball_src.go
+++ b/vendor/github.com/containers/image/tarball/tarball_src.go
@@ -207,7 +207,10 @@ func (is *tarballImageSource) Close() error {
return nil
}
-func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo) (io.ReadCloser, int64, error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
// We should only be asked about things in the manifest. Maybe the configuration blob.
if blobinfo.Digest == is.configID {
return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go
index a552e4597..dda332776 100644
--- a/vendor/github.com/containers/image/types/types.go
+++ b/vendor/github.com/containers/image/types/types.go
@@ -100,6 +100,82 @@ type BlobInfo struct {
MediaType string
}
+// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present.
+// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest).
+// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable.
+//
+// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
+// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
+// at least by not failing hard when encountering unknown data.
+type BICTransportScope struct {
+ Opaque string
+}
+
+// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope.
+// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob
+// can look it up using BlobInfoCache.CandidateLocations.
+//
+// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
+// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
+// at least by not failing hard when encountering unknown data.
+type BICLocationReference struct {
+ Opaque string
+}
+
+// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations.
+type BICReplacementCandidate struct {
+ Digest digest.Digest
+ Location BICLocationReference
+}
+
+// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies.
+//
+// It records two kinds of data:
+// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
+// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
+// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion),
+// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
+//
+// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
+// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
+//
+// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
+// compress/decompress blobs for their own purposes.
+//
+// - Known blob locations, managed by individual transports:
+// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
+// recording transport-specific information that allows the transport to reuse the blob in the future;
+// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
+//
+// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
+// can be directly reused within a registry, or mounted across registries within a registry server.)
+//
+// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
+// users of the cahce should just fall back to copying the blobs the usual way.
+type BlobInfoCache interface {
+ // UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+ // May return anyDigest if it is known to be uncompressed.
+ // Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+ UncompressedDigest(anyDigest digest.Digest) digest.Digest
+ // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+ // It’s allowed for anyDigest == uncompressed.
+ // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+ // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+ // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+ RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest)
+
+ // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+ // and can be reused given the opaque location data.
+ RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference)
+ // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+ // within the specified (transport scope) (if they still exist, which is not guaranteed).
+ //
+ // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+ // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+ // uncompressed digest.
+ CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate
+}
+
// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list).
// This is primarily useful for copying images around; for examining their properties, Image (below)
// is usually more useful.
@@ -120,7 +196,8 @@ type ImageSource interface {
GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error)
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
- GetBlob(context.Context, BlobInfo) (io.ReadCloser, int64, error)
+ // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+ GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error)
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
@@ -148,8 +225,7 @@ const (
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
//
// There is a specific required order for some of the calls:
-// PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
-// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest
+// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents)
// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist.
//
@@ -183,17 +259,19 @@ type ImageDestination interface {
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
+ // May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
- PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, isConfig bool) (BlobInfo, error)
- // HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
- // Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
- // If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
- // it returns a non-nil error only on an unexpected failure.
- HasBlob(ctx context.Context, info BlobInfo) (bool, int64, error)
- // ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree.
- ReapplyBlob(ctx context.Context, info BlobInfo) (BlobInfo, error)
+ PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error)
+ // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+ // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+ // info.Digest must not be empty.
+ // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+ // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+ // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+ // May use and/or update cache.
+ TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)
// PutManifest writes manifest to the destination.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
@@ -375,6 +453,8 @@ type SystemContext struct {
ArchitectureChoice string
// If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match.
OSChoice string
+ // If not "", overrides the system's default directory containing a blob info cache.
+ BlobInfoCacheDir string
// Additional tags when creating or copying a docker-archive.
DockerArchiveAdditionalTags []reference.NamedTagged
diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf
index de6dcbecf..88537981a 100644
--- a/vendor/github.com/containers/image/vendor.conf
+++ b/vendor/github.com/containers/image/vendor.conf
@@ -43,3 +43,4 @@ github.com/syndtr/gocapability master
github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175
github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a
github.com/ulikunitz/xz v0.5.4
+github.com/boltdb/bolt master