summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/containers/image/v5/copy/copy.go11
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_client.go6
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image.go4
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_dest.go53
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_src.go39
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go91
-rw-r--r--vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go61
-rw-r--r--vendor/github.com/containers/image/v5/manifest/common.go118
-rw-r--r--vendor/github.com/containers/image/v5/manifest/docker_schema2.go97
-rw-r--r--vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go2
-rw-r--r--vendor/github.com/containers/image/v5/manifest/list.go24
-rw-r--r--vendor/github.com/containers/image/v5/manifest/manifest.go10
-rw-r--r--vendor/github.com/containers/image/v5/manifest/oci.go106
-rw-r--r--vendor/github.com/containers/image/v5/manifest/oci_index.go5
-rw-r--r--vendor/github.com/containers/image/v5/version/version.go2
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/bar.go12
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/bar_filler.go4
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/go.mod4
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/go.sum8
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/internal/percentage.go3
20 files changed, 351 insertions, 309 deletions
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index e8610254c..9fc0e5123 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -798,7 +798,6 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// copyGroup is used to determine if all layers are copied
copyGroup := sync.WaitGroup{}
- copyGroup.Add(numLayers)
// copySemaphore is used to limit the number of parallel downloads to
// avoid malicious images causing troubles and to be nice to servers.
@@ -850,18 +849,22 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
if err := func() error { // A scope for defer
progressPool, progressCleanup := ic.c.newProgressPool(ctx)
- defer progressCleanup()
+ defer func() {
+ // Wait for all layers to be copied. progressCleanup() must not be called while any of the copyLayerHelpers interact with the progressPool.
+ copyGroup.Wait()
+ progressCleanup()
+ }()
for i, srcLayer := range srcInfos {
err = copySemaphore.Acquire(ctx, 1)
if err != nil {
return errors.Wrapf(err, "Can't acquire semaphore")
}
+ copyGroup.Add(1)
go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool)
}
- // Wait for all layers to be copied
- copyGroup.Wait()
+ // A call to copyGroup.Wait() is done at this point by the defer above.
return nil
}(); err != nil {
return err
diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go
index c5c49b90b..9461bc91a 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_client.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_client.go
@@ -613,6 +613,9 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
params.Add("client_id", "containers/image")
authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode()))
+ if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
+ authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
+ }
authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
res, err := c.client.Do(authReq)
@@ -665,6 +668,9 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
if c.auth.Username != "" && c.auth.Password != "" {
authReq.SetBasicAuth(c.auth.Username, c.auth.Password)
}
+ if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
+ authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
+ }
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
res, err := c.client.Do(authReq)
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go
index 483581dbc..479effa59 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image.go
@@ -37,7 +37,7 @@ func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference
// SourceRefFullName returns a fully expanded name for the repository this image is in.
func (i *Image) SourceRefFullName() string {
- return i.src.ref.ref.Name()
+ return i.src.logicalRef.ref.Name()
}
// GetRepositoryTags list all tags available in the repository. The tag
@@ -45,7 +45,7 @@ func (i *Image) SourceRefFullName() string {
// backward-compatible shim method which calls the module-level
// GetRepositoryTags)
func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) {
- return GetRepositoryTags(ctx, i.src.c.sys, i.src.ref)
+ return GetRepositoryTags(ctx, i.src.c.sys, i.src.logicalRef)
}
// GetRepositoryTags list all tags available in the repository. The tag
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
index ab74e1607..979100ee3 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
@@ -16,6 +16,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/uploadreader"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
@@ -162,20 +163,31 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
digester := digest.Canonical.Digester()
sizeCounter := &sizeCounter{}
- tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
- res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth, nil)
+ uploadLocation, err = func() (*url.URL, error) { // A scope for defer
+ uploadReader := uploadreader.NewUploadReader(io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter)))
+ // This error text should never be user-visible, we terminate only after makeRequestToResolvedURL
+ // returns, so there isn’t a way for the error text to be provided to any of our callers.
+ defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload"))
+ res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
+ if err != nil {
+ logrus.Debugf("Error uploading layer chunked %v", err)
+ return nil, err
+ }
+ defer res.Body.Close()
+ if !successStatus(res.StatusCode) {
+ return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer chunked")
+ }
+ uploadLocation, err := res.Location()
+ if err != nil {
+ return nil, errors.Wrap(err, "Error determining upload URL")
+ }
+ return uploadLocation, nil
+ }()
if err != nil {
- logrus.Debugf("Error uploading layer chunked, response %#v", res)
return types.BlobInfo{}, err
}
- defer res.Body.Close()
computedDigest := digester.Digest()
- uploadLocation, err = res.Location()
- if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
- }
-
// FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope)
locationQuery := uploadLocation.Query()
@@ -469,17 +481,17 @@ func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [
}
switch {
case d.c.signatureBase != nil:
- return d.putSignaturesToLookaside(signatures, instanceDigest)
+ return d.putSignaturesToLookaside(signatures, *instanceDigest)
case d.c.supportsSignatures:
- return d.putSignaturesToAPIExtension(ctx, signatures, instanceDigest)
+ return d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest)
default:
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
}
}
// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
-// which is not nil.
-func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, instanceDigest *digest.Digest) error {
+// which is not nil, for a manifest with manifestDigest.
+func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, manifestDigest digest.Digest) error {
// FIXME? This overwrites files one at a time, definitely not atomic.
// A failure when updating signatures with a reordered copy could lose some of them.
@@ -490,7 +502,7 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, i
// NOTE: Keep this in sync with docs/signature-protocols.md!
for i, signature := range signatures {
- url := signatureStorageURL(d.c.signatureBase, *instanceDigest, i)
+ url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
if url == nil {
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
@@ -505,7 +517,7 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, i
// is enough for dockerImageSource to stop looking for other signatures, so that
// is sufficient.
for i := len(signatures); ; i++ {
- url := signatureStorageURL(d.c.signatureBase, *instanceDigest, i)
+ url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
if url == nil {
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
@@ -564,8 +576,9 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error
}
}
-// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension.
-func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension,
+// for a manifest with manifestDigest.
+func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte, manifestDigest digest.Digest) error {
// Skip dealing with the manifest digest, or reading the old state, if not necessary.
if len(signatures) == 0 {
return nil
@@ -575,7 +588,7 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
// always adds signatures. Eventually we should also allow removing signatures,
// but the X-Registry-Supports-Signatures API extension does not support that yet.
- existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, *instanceDigest)
+ existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, manifestDigest)
if err != nil {
return err
}
@@ -600,7 +613,7 @@ sigExists:
if err != nil || n != 16 {
return errors.Wrapf(err, "Error generating random signature len %d", n)
}
- signatureName = fmt.Sprintf("%s@%032x", instanceDigest.String(), randBytes)
+ signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
if _, ok := existingSigNames[signatureName]; !ok {
break
}
@@ -616,7 +629,7 @@ sigExists:
return err
}
- path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
+ path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String())
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil)
if err != nil {
return err
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
index 9c0c20c64..10aff615e 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
@@ -24,8 +24,9 @@ import (
)
type dockerImageSource struct {
- ref dockerReference
- c *dockerClient
+ logicalRef dockerReference // The reference the user requested.
+ physicalRef dockerReference // The actual reference we are accessing (possibly a mirror)
+ c *dockerClient
// State
cachedManifest []byte // nil if not loaded yet
cachedManifestMIMEType string // Only valid if cachedManifest != nil
@@ -49,7 +50,6 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
}
}
- primaryDomain := reference.Domain(ref.ref)
// Check all endpoints for the manifest availability. If we find one that does
// contain the image, it will be used for all future pull actions. Always try the
// non-mirror original location last; this both transparently handles the case
@@ -66,7 +66,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
attempts := []attempt{}
for _, pullSource := range pullSources {
logrus.Debugf("Trying to access %q", pullSource.Reference)
- s, err := newImageSourceAttempt(ctx, sys, pullSource, primaryDomain)
+ s, err := newImageSourceAttempt(ctx, sys, ref, pullSource)
if err == nil {
return s, nil
}
@@ -95,32 +95,33 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
}
// newImageSourceAttempt is an internal helper for newImageSource. Everyone else must call newImageSource.
-// Given a pullSource and primaryDomain, return a dockerImageSource if it is reachable.
+// Given a logicalReference and a pullSource, return a dockerImageSource if it is reachable.
// The caller must call .Close() on the returned ImageSource.
-func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, pullSource sysregistriesv2.PullSource, primaryDomain string) (*dockerImageSource, error) {
- ref, err := newReference(pullSource.Reference)
+func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource) (*dockerImageSource, error) {
+ physicalRef, err := newReference(pullSource.Reference)
if err != nil {
return nil, err
}
endpointSys := sys
// sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors.
- if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(ref.ref) != primaryDomain {
+ if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(physicalRef.ref) != reference.Domain(logicalRef.ref) {
copy := *endpointSys
copy.DockerAuthConfig = nil
copy.DockerBearerRegistryToken = ""
endpointSys = &copy
}
- client, err := newDockerClientFromRef(endpointSys, ref, false, "pull")
+ client, err := newDockerClientFromRef(endpointSys, physicalRef, false, "pull")
if err != nil {
return nil, err
}
client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure
s := &dockerImageSource{
- ref: ref,
- c: client,
+ logicalRef: logicalRef,
+ physicalRef: physicalRef,
+ c: client,
}
if err := s.ensureManifestIsLoaded(ctx); err != nil {
@@ -132,7 +133,7 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, pullSo
// Reference returns the reference used to set up this source, _as specified by the user_
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
func (s *dockerImageSource) Reference() types.ImageReference {
- return s.ref
+ return s.logicalRef
}
// Close removes resources associated with an initialized ImageSource, if any.
@@ -181,7 +182,7 @@ func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *dig
}
func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
- path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
+ path := fmt.Sprintf(manifestPath, reference.Path(s.physicalRef.ref), tagOrDigest)
headers := map[string][]string{
"Accept": manifest.DefaultRequestedManifestMIMETypes,
}
@@ -191,7 +192,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
- return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.ref.ref.Name())
+ return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
}
manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
@@ -213,7 +214,7 @@ func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error {
return nil
}
- reference, err := s.ref.tagOrDigest()
+ reference, err := s.physicalRef.tagOrDigest()
if err != nil {
return err
}
@@ -271,7 +272,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
return s.getExternalBlob(ctx, info.URLs)
}
- path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
+ path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
logrus.Debugf("Downloading %s", path)
res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
if err != nil {
@@ -280,7 +281,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
if err := httpResponseToError(res, "Error fetching blob"); err != nil {
return nil, 0, err
}
- cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref))
+ cache.RecordKnownLocation(s.physicalRef.Transport(), bicTransportScope(s.physicalRef), info.Digest, newBICLocationReference(s.physicalRef))
return res.Body, getBlobSize(res), nil
}
@@ -308,7 +309,7 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *
if instanceDigest != nil {
return *instanceDigest, nil
}
- if digested, ok := s.ref.ref.(reference.Digested); ok {
+ if digested, ok := s.physicalRef.ref.(reference.Digested); ok {
d := digested.Digest()
if d.Algorithm() == digest.Canonical {
return d, nil
@@ -398,7 +399,7 @@ func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, i
return nil, err
}
- parsedBody, err := s.c.getExtensionsSignatures(ctx, s.ref, manifestDigest)
+ parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
index c23457642..c4d42f3eb 100644
--- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
+++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
@@ -115,12 +115,23 @@ func getCPUVariant(os string, arch string) string {
return ""
}
+// compatibility contains, for a specified architecture, a list of known variants, in the
+// order from most capable (most restrictive) to least capable (most compatible).
+// Architectures that don’t have variants should not have an entry here.
var compatibility = map[string][]string{
- "arm": {"v7", "v6", "v5"},
+ "arm": {"v8", "v7", "v6", "v5"},
"arm64": {"v8"},
}
-// Returns all compatible platforms with the platform specifics possibly overriden by user,
+// baseVariants contains, for a specified architecture, a variant that is known to be
+// supported by _all_ machines using that architecture.
+// Architectures that don’t have variants, or where there are possible versions without
+// an established variant name, should not have an entry here.
+var baseVariants = map[string]string{
+ "arm64": "v8",
+}
+
+// WantedPlatforms returns all compatible platforms with the platform specifics possibly overriden by user,
// the most compatible platform is first.
// If some option (arch, os, variant) is not present, a value from current platform is detected.
func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
@@ -145,59 +156,45 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
wantedOS = ctx.OSChoice
}
- var wantedPlatforms []imgspecv1.Platform
- if wantedVariant != "" && compatibility[wantedArch] != nil {
- wantedPlatforms = make([]imgspecv1.Platform, 0, len(compatibility[wantedArch]))
- wantedIndex := -1
- for i, v := range compatibility[wantedArch] {
- if wantedVariant == v {
- wantedIndex = i
- break
+ var variants []string = nil
+ if wantedVariant != "" {
+ if compatibility[wantedArch] != nil {
+ variantOrder := compatibility[wantedArch]
+ for i, v := range variantOrder {
+ if wantedVariant == v {
+ variants = variantOrder[i:]
+ break
+ }
}
}
- // user wants a variant which we know nothing about - not even compatibility
- if wantedIndex == -1 {
- wantedPlatforms = []imgspecv1.Platform{
- {
- OS: wantedOS,
- Architecture: wantedArch,
- Variant: wantedVariant,
- },
- }
- } else {
- for i := wantedIndex; i < len(compatibility[wantedArch]); i++ {
- v := compatibility[wantedArch][i]
- wantedPlatforms = append(wantedPlatforms, imgspecv1.Platform{
- OS: wantedOS,
- Architecture: wantedArch,
- Variant: v,
- })
- }
+ if variants == nil {
+ // user wants a variant which we know nothing about - not even compatibility
+ variants = []string{wantedVariant}
}
+ variants = append(variants, "")
} else {
- wantedPlatforms = []imgspecv1.Platform{
- {
- OS: wantedOS,
- Architecture: wantedArch,
- Variant: wantedVariant,
- },
+ variants = append(variants, "") // No variant specified, use a “no variant specified” image if present
+ if baseVariant, ok := baseVariants[wantedArch]; ok {
+ // But also accept an image with the “base” variant for the architecture, if it exists.
+ variants = append(variants, baseVariant)
}
}
- return wantedPlatforms, nil
+ res := make([]imgspecv1.Platform, 0, len(variants))
+ for _, v := range variants {
+ res = append(res, imgspecv1.Platform{
+ OS: wantedOS,
+ Architecture: wantedArch,
+ Variant: v,
+ })
+ }
+ return res, nil
}
+// MatchesPlatform returns true if a platform descriptor from a multi-arch image matches
+// an item from the return value of WantedPlatforms.
func MatchesPlatform(image imgspecv1.Platform, wanted imgspecv1.Platform) bool {
- if image.Architecture != wanted.Architecture {
- return false
- }
- if image.OS != wanted.OS {
- return false
- }
-
- if wanted.Variant == "" || image.Variant == wanted.Variant {
- return true
- }
-
- return false
+ return image.Architecture == wanted.Architecture &&
+ image.OS == wanted.OS &&
+ image.Variant == wanted.Variant
}
diff --git a/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go b/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go
new file mode 100644
index 000000000..6aa9ead68
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go
@@ -0,0 +1,61 @@
+package uploadreader
+
+import (
+ "io"
+ "sync"
+)
+
+// UploadReader is a pass-through reader for use in sending non-trivial data using the net/http
+// package (http.NewRequest, http.Post and the like).
+//
+// The net/http package uses a separate goroutine to upload data to a HTTP connection,
+// and it is possible for the server to return a response (typically an error) before consuming
+// the full body of the request. In that case http.Client.Do can return with an error while
+// the body is still being read — regardless of of the cancellation, if any, of http.Request.Context().
+//
+// As a result, any data used/updated by the io.Reader() provided as the request body may be
+// used/updated even after http.Client.Do returns, causing races.
+//
+// To fix this, UploadReader provides a synchronized Terminate() method, which can block for
+// a not-completely-negligible time (for a duration of the underlying Read()), but guarantees that
+// after Terminate() returns, the underlying reader is never used any more (unlike calling
+// the cancellation callback of context.WithCancel, which returns before any recipients may have
+// reacted to the cancellation).
+type UploadReader struct {
+ mutex sync.Mutex
+ // The following members can only be used with mutex held
+ reader io.Reader
+ terminationError error // nil if not terminated yet
+}
+
+// NewUploadReader returns an UploadReader for an "underlying" reader.
+func NewUploadReader(underlying io.Reader) *UploadReader {
+ return &UploadReader{
+ reader: underlying,
+ terminationError: nil,
+ }
+}
+
+// Read returns the error set by Terminate, if any, or calls the underlying reader.
+// It is safe to call this from a different goroutine than Terminate.
+func (ur *UploadReader) Read(p []byte) (int, error) {
+ ur.mutex.Lock()
+ defer ur.mutex.Unlock()
+
+ if ur.terminationError != nil {
+ return 0, ur.terminationError
+ }
+ return ur.reader.Read(p)
+}
+
+// Terminate waits for in-progress Read calls, if any, to finish, and ensures that after
+// this function returns, any Read calls will fail with the provided error, and the underlying
+// reader will never be used any more.
+//
+// It is safe to call this from a different goroutine than Read.
+func (ur *UploadReader) Terminate(err error) {
+ ur.mutex.Lock() // May block for some time if ur.reader.Read() is in progress
+ defer ur.mutex.Unlock()
+
+ ur.terminationError = err
+}
diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go
new file mode 100644
index 000000000..fa2b39e0e
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/manifest/common.go
@@ -0,0 +1,118 @@
+package manifest
+
+import (
+ "fmt"
+
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/types"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// dupStringSlice returns a deep copy of a slice of strings, or nil if the
+// source slice is empty.
+func dupStringSlice(list []string) []string {
+ if len(list) == 0 {
+ return nil
+ }
+ dup := make([]string, len(list))
+ copy(dup, list)
+ return dup
+}
+
+// dupStringStringMap returns a deep copy of a map[string]string, or nil if the
+// passed-in map is nil or has no keys.
+func dupStringStringMap(m map[string]string) map[string]string {
+ if len(m) == 0 {
+ return nil
+ }
+ result := make(map[string]string)
+ for k, v := range m {
+ result[k] = v
+ }
+ return result
+}
+
+// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
+// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
+func layerInfosToStrings(infos []LayerInfo) []string {
+ layers := make([]string, len(infos))
+ for i, info := range infos {
+ layers[i] = info.Digest.String()
+ }
+ return layers
+}
+
+// compressionMIMETypeSet describes a set of MIME type “variants” that represent differently-compressed
+// versions of “the same kind of content”.
+// The map key is the return value of compression.Algorithm.Name(), or mtsUncompressed;
+// the map value is a MIME type, or mtsUnsupportedMIMEType to mean "recognized but unsupported".
+type compressionMIMETypeSet map[string]string
+
+const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant
+const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported”
+
+// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil
+// to mean "no compression"), based on variantTable.
+func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compression.Algorithm) (string, error) {
+ if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
+ return "", fmt.Errorf("cannot update unknown MIME type")
+ }
+ for _, variants := range variantTable {
+ for _, mt := range variants {
+ if mt == mimeType { // Found the variant
+ name := mtsUncompressed
+ if algorithm != nil {
+ name = algorithm.Name()
+ }
+ if res, ok := variants[name]; ok {
+ if res != mtsUnsupportedMIMEType {
+ return res, nil
+ }
+ if name != mtsUncompressed {
+ return "", fmt.Errorf("%s compression is not supported", name)
+ }
+ return "", errors.New("uncompressed variant is not supported")
+ }
+ if name != mtsUncompressed {
+ return "", fmt.Errorf("unknown compression algorithm %s", name)
+ }
+ // We can't very well say “the idea of no compression is unknown”
+ return "", errors.New("uncompressed variant is not supported")
+ }
+ }
+ }
+ if algorithm != nil {
+ return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType)
+ }
+ return "", fmt.Errorf("unsupported MIME type for decompression: %s", mimeType)
+}
+
+// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to
+// mimeType, based on variantTable. It may use updated.Digest for error messages.
+func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, updated types.BlobInfo) (string, error) {
+ // Note that manifests in containers-storage might be reporting the
+ // wrong media type since the original manifests are stored while layers
+ // are decompressed in storage. Hence, we need to consider the case
+ // that an already {de}compressed layer should be {de}compressed;
+ // compressionVariantMIMEType does that by not caring whether the original is
+ // {de}compressed.
+ switch updated.CompressionOperation {
+ case types.PreserveOriginal:
+ // Keep the original media type.
+ return mimeType, nil
+
+ case types.Decompress:
+ return compressionVariantMIMEType(variantTable, mimeType, nil)
+
+ case types.Compress:
+ if updated.CompressionAlgorithm == nil {
+ logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", updated.Digest)
+ return mimeType, nil
+ }
+ return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm)
+
+ default:
+ return "", fmt.Errorf("unknown compression operation (%d)", updated.CompressionOperation)
+ }
+}
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
index ff0780fe3..8d8bb9e01 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
@@ -10,7 +10,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
@@ -213,26 +212,17 @@ func (m *Schema2) LayerInfos() []LayerInfo {
return blobs
}
-// isSchema2ForeignLayer is a convenience wrapper to check if a given mime type
-// is a compressed or decompressed schema 2 foreign layer.
-func isSchema2ForeignLayer(mimeType string) bool {
- switch mimeType {
- case DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip:
- return true
- default:
- return false
- }
-}
-
-// isSchema2Layer is a convenience wrapper to check if a given mime type is a
-// compressed or decompressed schema 2 layer.
-func isSchema2Layer(mimeType string) bool {
- switch mimeType {
- case DockerV2SchemaLayerMediaTypeUncompressed, DockerV2Schema2LayerMediaType:
- return true
- default:
- return false
- }
+var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{
+ {
+ mtsUncompressed: DockerV2Schema2ForeignLayerMediaType,
+ compression.Gzip.Name(): DockerV2Schema2ForeignLayerMediaTypeGzip,
+ compression.Zstd.Name(): mtsUnsupportedMIMEType,
+ },
+ {
+ mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed,
+ compression.Gzip.Name(): DockerV2Schema2LayerMediaType,
+ compression.Zstd.Name(): mtsUnsupportedMIMEType,
+ },
}
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
@@ -243,67 +233,16 @@ func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
original := m.LayersDescriptors
m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos))
for i, info := range layerInfos {
+ mimeType := original[i].MediaType
// First make sure we support the media type of the original layer.
- if err := SupportedSchema2MediaType(original[i].MediaType); err != nil {
- return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType)
+ if err := SupportedSchema2MediaType(mimeType); err != nil {
+ return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer %q: %q", info.Digest, mimeType)
}
-
- // Set the correct media types based on the specified compression
- // operation, the desired compression algorithm AND the original media
- // type.
- //
- // Note that manifests in containers-storage might be reporting the
- // wrong media type since the original manifests are stored while layers
- // are decompressed in storage. Hence, we need to consider the case
- // that an already {de}compressed layer should be {de}compressed, which
- // is being addressed in `isSchema2{Foreign}Layer`.
- switch info.CompressionOperation {
- case types.PreserveOriginal:
- // Keep the original media type.
- m.LayersDescriptors[i].MediaType = original[i].MediaType
-
- case types.Decompress:
- // Decompress the original media type and check if it was
- // non-distributable one or not.
- mimeType := original[i].MediaType
- switch {
- case isSchema2ForeignLayer(mimeType):
- m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaType
- case isSchema2Layer(mimeType):
- m.LayersDescriptors[i].MediaType = DockerV2SchemaLayerMediaTypeUncompressed
- default:
- return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", original[i].MediaType)
- }
-
- case types.Compress:
- if info.CompressionAlgorithm == nil {
- logrus.Debugf("Preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest)
- m.LayersDescriptors[i].MediaType = original[i].MediaType
- break
- }
- // Compress the original media type and set the new one based on
- // that type (distributable or not) and the specified compression
- // algorithm. Throw an error if the algorithm is not supported.
- switch info.CompressionAlgorithm.Name() {
- case compression.Gzip.Name():
- mimeType := original[i].MediaType
- switch {
- case isSchema2ForeignLayer(mimeType):
- m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaTypeGzip
- case isSchema2Layer(mimeType):
- m.LayersDescriptors[i].MediaType = DockerV2Schema2LayerMediaType
- default:
- return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType)
- }
- case compression.Zstd.Name():
- return fmt.Errorf("Error preparing updated manifest: zstd compression is not supported for docker images")
- default:
- return fmt.Errorf("Error preparing updated manifest: unknown compression algorithm %q for layer %q", info.CompressionAlgorithm.Name(), info.Digest)
- }
-
- default:
- return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest)
+ mimeType, err := updatedMIMEType(schema2CompressionMIMETypeSets, mimeType, info)
+ if err != nil {
+ return errors.Wrapf(err, "Error preparing updated manifest, layer %q", info.Digest)
}
+ m.LayersDescriptors[i].MediaType = mimeType
m.LayersDescriptors[i].Digest = info.Digest
m.LayersDescriptors[i].Size = info.Size
m.LayersDescriptors[i].URLs = info.URLs
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
index 5f96a981a..bfedff69c 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
@@ -107,7 +107,7 @@ func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest
}
}
}
- return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %s, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
+ return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the list in a blob format.
diff --git a/vendor/github.com/containers/image/v5/manifest/list.go b/vendor/github.com/containers/image/v5/manifest/list.go
index c7d741dc2..58982597e 100644
--- a/vendor/github.com/containers/image/v5/manifest/list.go
+++ b/vendor/github.com/containers/image/v5/manifest/list.go
@@ -59,30 +59,6 @@ type ListUpdate struct {
MediaType string
}
-// dupStringSlice returns a deep copy of a slice of strings, or nil if the
-// source slice is empty.
-func dupStringSlice(list []string) []string {
- if len(list) == 0 {
- return nil
- }
- dup := make([]string, len(list))
- copy(dup, list)
- return dup
-}
-
-// dupStringStringMap returns a deep copy of a map[string]string, or nil if the
-// passed-in map is nil or has no keys.
-func dupStringStringMap(m map[string]string) map[string]string {
- if len(m) == 0 {
- return nil
- }
- result := make(map[string]string)
- for k, v := range m {
- result[k] = v
- }
- return result
-}
-
// ListFromBlob parses a list of manifests.
func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
normalized := NormalizedMIMEType(manifestMIMEType)
diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go
index 033b8d951..7b0758873 100644
--- a/vendor/github.com/containers/image/v5/manifest/manifest.go
+++ b/vendor/github.com/containers/image/v5/manifest/manifest.go
@@ -256,13 +256,3 @@ func FromBlob(manblob []byte, mt string) (Manifest, error) {
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt)
}
-
-// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
-// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
-func layerInfosToStrings(infos []LayerInfo) []string {
- layers := make([]string, len(infos))
- for i, info := range infos {
- layers[i] = info.Digest.String()
- }
- return layers
-}
diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go
index aafe6693b..40c40dee8 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci.go
@@ -12,7 +12,6 @@ import (
"github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
@@ -95,26 +94,17 @@ func (m *OCI1) LayerInfos() []LayerInfo {
return blobs
}
-// isOCI1NonDistributableLayer is a convenience wrapper to check if a given mime
-// type is a compressed or decompressed OCI v1 non-distributable layer.
-func isOCI1NonDistributableLayer(mimeType string) bool {
- switch mimeType {
- case imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd:
- return true
- default:
- return false
- }
-}
-
-// isOCI1Layer is a convenience wrapper to check if a given mime type is a
-// compressed or decompressed OCI v1 layer.
-func isOCI1Layer(mimeType string) bool {
- switch mimeType {
- case imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd:
- return true
- default:
- return false
- }
+var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
+ {
+ mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable,
+ compression.Gzip.Name(): imgspecv1.MediaTypeImageLayerNonDistributableGzip,
+ compression.Zstd.Name(): imgspecv1.MediaTypeImageLayerNonDistributableZstd,
+ },
+ {
+ mtsUncompressed: imgspecv1.MediaTypeImageLayer,
+ compression.Gzip.Name(): imgspecv1.MediaTypeImageLayerGzip,
+ compression.Zstd.Name(): imgspecv1.MediaTypeImageLayerZstd,
+ },
}
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers)
@@ -133,79 +123,19 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
}
mimeType = decMimeType
}
-
- // Set the correct media types based on the specified compression
- // operation, the desired compression algorithm AND the original media
- // type.
- //
- // Note that manifests in containers-storage might be reporting the
- // wrong media type since the original manifests are stored while layers
- // are decompressed in storage. Hence, we need to consider the case
- // that an already {de}compressed layer should be {de}compressed, which
- // is being addressed in `isSchema2{Foreign}Layer`.
- switch info.CompressionOperation {
- case types.PreserveOriginal:
- // Keep the original media type.
- m.Layers[i].MediaType = mimeType
-
- case types.Decompress:
- // Decompress the original media type and check if it was
- // non-distributable one or not.
- switch {
- case isOCI1NonDistributableLayer(mimeType):
- m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
- case isOCI1Layer(mimeType):
- m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayer
- default:
- return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", mimeType)
- }
-
- case types.Compress:
- if info.CompressionAlgorithm == nil {
- logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest)
- m.Layers[i].MediaType = mimeType
- break
- }
- // Compress the original media type and set the new one based on
- // that type (distributable or not) and the specified compression
- // algorithm. Throw an error if the algorithm is not supported.
- switch info.CompressionAlgorithm.Name() {
- case compression.Gzip.Name():
- switch {
- case isOCI1NonDistributableLayer(mimeType):
- m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip
- case isOCI1Layer(mimeType):
- m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerGzip
- default:
- return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", mimeType)
- }
-
- case compression.Zstd.Name():
- switch {
- case isOCI1NonDistributableLayer(mimeType):
- m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableZstd
- case isOCI1Layer(mimeType):
- m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerZstd
- default:
- return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", mimeType)
- }
-
- default:
- return fmt.Errorf("Error preparing updated manifest: unknown compression algorithm %q for layer %q", info.CompressionAlgorithm.Name(), info.Digest)
- }
-
- default:
- return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest)
+ mimeType, err := updatedMIMEType(oci1CompressionMIMETypeSets, mimeType, info)
+ if err != nil {
+ return errors.Wrapf(err, "Error preparing updated manifest, layer %q", info.Digest)
}
-
if info.CryptoOperation == types.Encrypt {
- encMediaType, err := getEncryptedMediaType(m.Layers[i].MediaType)
+ encMediaType, err := getEncryptedMediaType(mimeType)
if err != nil {
- return fmt.Errorf("error preparing updated manifest: encryption specified but no counterpart for mediatype: %q", m.Layers[i].MediaType)
+ return fmt.Errorf("error preparing updated manifest: encryption specified but no counterpart for mediatype: %q", mimeType)
}
- m.Layers[i].MediaType = encMediaType
+ mimeType = encMediaType
}
+ m.Layers[i].MediaType = mimeType
m.Layers[i].Digest = info.Digest
m.Layers[i].Size = info.Size
m.Layers[i].Annotations = info.Annotations
diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go
index 18cc8135c..7bdea8fb2 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci_index.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go
@@ -79,6 +79,9 @@ func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest,
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range index.Manifests {
+ if d.Platform == nil {
+ continue
+ }
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
@@ -97,7 +100,7 @@ func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest,
return d.Digest, nil
}
}
- return "", fmt.Errorf("no image found in image index for architecture %s, variant %s, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
+ return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the index in a blob format.
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index 717c2b1b3..67f57e03e 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -8,7 +8,7 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 4
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 3
+ VersionPatch = 4
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""
diff --git a/vendor/github.com/vbauerster/mpb/v5/bar.go b/vendor/github.com/vbauerster/mpb/v5/bar.go
index 1a4c66fe1..13bda2247 100644
--- a/vendor/github.com/vbauerster/mpb/v5/bar.go
+++ b/vendor/github.com/vbauerster/mpb/v5/bar.go
@@ -69,6 +69,7 @@ type bState struct {
trimSpace bool
toComplete bool
completeFlushed bool
+ ignoreComplete bool
noPop bool
aDecorators []decor.Decorator
pDecorators []decor.Decorator
@@ -170,17 +171,18 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) {
}
// SetTotal sets total dynamically.
-// If total is less or equal to zero it takes progress' current value.
-// If complete is true, complete event will be triggered.
+// If total is less than or equal to zero it takes progress' current value.
+// A complete flag enables or disables complete event on `current >= total`.
func (b *Bar) SetTotal(total int64, complete bool) {
select {
case b.operateState <- func(s *bState) {
+ s.ignoreComplete = !complete
if total <= 0 {
s.total = s.current
} else {
s.total = total
}
- if complete && !s.toComplete {
+ if !s.ignoreComplete && !s.toComplete {
s.current = s.total
s.toComplete = true
go b.refreshTillShutdown()
@@ -197,7 +199,7 @@ func (b *Bar) SetCurrent(current int64) {
s.iterated = true
s.lastN = current - s.current
s.current = current
- if s.total > 0 && s.current >= s.total {
+ if !s.ignoreComplete && s.current >= s.total {
s.current = s.total
s.toComplete = true
go b.refreshTillShutdown()
@@ -224,7 +226,7 @@ func (b *Bar) IncrInt64(n int64) {
s.iterated = true
s.lastN = n
s.current += n
- if s.total > 0 && s.current >= s.total {
+ if !s.ignoreComplete && s.current >= s.total {
s.current = s.total
s.toComplete = true
go b.refreshTillShutdown()
diff --git a/vendor/github.com/vbauerster/mpb/v5/bar_filler.go b/vendor/github.com/vbauerster/mpb/v5/bar_filler.go
index 00bf0a494..33dbf191d 100644
--- a/vendor/github.com/vbauerster/mpb/v5/bar_filler.go
+++ b/vendor/github.com/vbauerster/mpb/v5/bar_filler.go
@@ -76,7 +76,7 @@ func (s *barFiller) SetReverse(reverse bool) {
s.flush = reverseFlush
} else {
s.tip = s.format[rTip]
- s.flush = normalFlush
+ s.flush = regularFlush
}
s.reverse = reverse
}
@@ -125,7 +125,7 @@ func (s *barFiller) Fill(w io.Writer, width int, stat *decor.Statistics) {
s.flush(w, bb)
}
-func normalFlush(w io.Writer, bb [][]byte) {
+func regularFlush(w io.Writer, bb [][]byte) {
for i := 0; i < len(bb); i++ {
w.Write(bb[i])
}
diff --git a/vendor/github.com/vbauerster/mpb/v5/go.mod b/vendor/github.com/vbauerster/mpb/v5/go.mod
index 672191fc8..1d8d52934 100644
--- a/vendor/github.com/vbauerster/mpb/v5/go.mod
+++ b/vendor/github.com/vbauerster/mpb/v5/go.mod
@@ -3,8 +3,8 @@ module github.com/vbauerster/mpb/v5
require (
github.com/VividCortex/ewma v1.1.1
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
- golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4
- golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 // indirect
+ golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
+ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f // indirect
)
go 1.14
diff --git a/vendor/github.com/vbauerster/mpb/v5/go.sum b/vendor/github.com/vbauerster/mpb/v5/go.sum
index 9a411976a..99ca1bf67 100644
--- a/vendor/github.com/vbauerster/mpb/v5/go.sum
+++ b/vendor/github.com/vbauerster/mpb/v5/go.sum
@@ -3,11 +3,11 @@ github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmx
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4 h1:QmwruyY+bKbDDL0BaglrbZABEali68eoMFhTZpCjYVA=
-golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU=
+golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/vbauerster/mpb/v5/internal/percentage.go b/vendor/github.com/vbauerster/mpb/v5/internal/percentage.go
index 7e261cb22..e321e0a6b 100644
--- a/vendor/github.com/vbauerster/mpb/v5/internal/percentage.go
+++ b/vendor/github.com/vbauerster/mpb/v5/internal/percentage.go
@@ -7,6 +7,9 @@ func Percentage(total, current int64, width int) float64 {
if total <= 0 {
return 0
}
+ if current >= total {
+ return float64(width)
+ }
return float64(int64(width)*current) / float64(total)
}