summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
authordependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>2021-08-26 13:21:26 +0000
committerGitHub <noreply@github.com>2021-08-26 13:21:26 +0000
commitf5ce02b227f43feb7a02b09890facf198448621e (patch)
treef7ff84b1f21b6b7a31b166ce8ad77caf0e456b03 /vendor/github.com
parent6f1faf36da9482c1cc99814772d16125fe9a4e36 (diff)
downloadpodman-f5ce02b227f43feb7a02b09890facf198448621e.tar.gz
podman-f5ce02b227f43feb7a02b09890facf198448621e.tar.bz2
podman-f5ce02b227f43feb7a02b09890facf198448621e.zip
Bump github.com/containers/image/v5 from 5.15.2 to 5.16.0
Bumps [github.com/containers/image/v5](https://github.com/containers/image) from 5.15.2 to 5.16.0. - [Release notes](https://github.com/containers/image/releases) - [Commits](https://github.com/containers/image/compare/v5.15.2...v5.16.0) --- updated-dependencies: - dependency-name: github.com/containers/image/v5 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/containers/image/v5/copy/copy.go320
-rw-r--r--vendor/github.com/containers/image/v5/copy/digesting_reader.go62
-rw-r--r--vendor/github.com/containers/image/v5/directory/directory_dest.go17
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_dest.go24
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_src.go160
-rw-r--r--vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go14
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go1
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go1
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go1
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go1
-rw-r--r--vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go57
-rw-r--r--vendor/github.com/containers/image/v5/internal/types/types.go1
-rw-r--r--vendor/github.com/containers/image/v5/oci/archive/oci_dest.go2
-rw-r--r--vendor/github.com/containers/image/v5/oci/layout/oci_dest.go17
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift-copies.go2
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift.go2
-rw-r--r--vendor/github.com/containers/image/v5/ostree/ostree_dest.go20
-rw-r--r--vendor/github.com/containers/image/v5/ostree/ostree_src.go1
-rw-r--r--vendor/github.com/containers/image/v5/ostree/ostree_transport.go1
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config.go18
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go2
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go1
-rw-r--r--vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go1
-rw-r--r--vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go1
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_image.go72
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_reference.go1
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_transport.go1
-rw-r--r--vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go1
-rw-r--r--vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go1
-rw-r--r--vendor/github.com/containers/image/v5/transports/alltransports/ostree.go1
-rw-r--r--vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go1
-rw-r--r--vendor/github.com/containers/image/v5/transports/alltransports/storage.go1
-rw-r--r--vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go1
-rw-r--r--vendor/github.com/containers/image/v5/types/types.go2
-rw-r--r--vendor/github.com/containers/image/v5/version/version.go4
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/go.mod6
-rw-r--r--vendor/github.com/containers/storage/go.sum12
-rw-r--r--vendor/github.com/containers/storage/layers.go59
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage_linux.go207
-rw-r--r--vendor/github.com/containers/storage/pkg/ioutils/readers.go17
-rw-r--r--vendor/github.com/containers/storage/store.go30
-rw-r--r--vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go6
-rw-r--r--vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go11
-rw-r--r--vendor/github.com/vbatts/tar-split/tar/storage/getter.go3
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/README.md2
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/bar.go116
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go150
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/container_option.go6
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go10
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go4
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/go.mod2
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/go.sum4
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/internal/percentage.go4
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/progress.go46
55 files changed, 962 insertions, 548 deletions
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index b4ff8aa10..e1649ba8e 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -36,14 +36,6 @@ import (
"golang.org/x/term"
)
-type digestingReader struct {
- source io.Reader
- digester digest.Digester
- expectedDigest digest.Digest
- validationFailed bool
- validationSucceeded bool
-}
-
var (
// ErrDecryptParamsMissing is returned if there is missing decryption parameters
ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present")
@@ -51,6 +43,10 @@ var (
// maxParallelDownloads is used to limit the maximum number of parallel
// downloads. Let's follow Firefox by limiting it to 6.
maxParallelDownloads = uint(6)
+
+ // defaultCompressionFormat is used if the destination transport requests
+ // compression, and the user does not explicitly instruct us to use an algorithm.
+ defaultCompressionFormat = &compression.Gzip
)
// compressionBufferSize is the buffer size used to compress a blob
@@ -64,66 +60,22 @@ var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
}
-// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
-// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
-// (neither is set if EOF is never reached).
-func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
- var digester digest.Digester
- if err := expectedDigest.Validate(); err != nil {
- return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
- }
- digestAlgorithm := expectedDigest.Algorithm()
- if !digestAlgorithm.Available() {
- return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
- }
- digester = digestAlgorithm.Digester()
-
- return &digestingReader{
- source: source,
- digester: digester,
- expectedDigest: expectedDigest,
- validationFailed: false,
- }, nil
-}
-
-func (d *digestingReader) Read(p []byte) (int, error) {
- n, err := d.source.Read(p)
- if n > 0 {
- if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil {
- // Coverage: This should not happen, the hash.Hash interface requires
- // d.digest.Write to never return an error, and the io.Writer interface
- // requires n2 == len(input) if no error is returned.
- return 0, errors.Wrapf(err, "updating digest during verification: %d vs. %d", n2, n)
- }
- }
- if err == io.EOF {
- actualDigest := d.digester.Digest()
- if actualDigest != d.expectedDigest {
- d.validationFailed = true
- return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
- }
- d.validationSucceeded = true
- }
- return n, err
-}
-
// copier allows us to keep track of diffID values for blobs, and other
// data shared across one or more images in a possible manifest list.
type copier struct {
- dest types.ImageDestination
- rawSource types.ImageSource
- reportWriter io.Writer
- progressOutput io.Writer
- progressInterval time.Duration
- progress chan types.ProgressProperties
- blobInfoCache internalblobinfocache.BlobInfoCache2
- copyInParallel bool
- compressionFormat compressiontypes.Algorithm
- compressionLevel *int
- ociDecryptConfig *encconfig.DecryptConfig
- ociEncryptConfig *encconfig.EncryptConfig
- maxParallelDownloads uint
- downloadForeignLayers bool
+ dest types.ImageDestination
+ rawSource types.ImageSource
+ reportWriter io.Writer
+ progressOutput io.Writer
+ progressInterval time.Duration
+ progress chan types.ProgressProperties
+ blobInfoCache internalblobinfocache.BlobInfoCache2
+ compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
+ compressionLevel *int
+ ociDecryptConfig *encconfig.DecryptConfig
+ ociEncryptConfig *encconfig.EncryptConfig
+ concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs
+ downloadForeignLayers bool
}
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
@@ -196,7 +148,10 @@ type Options struct {
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
OciDecryptConfig *encconfig.DecryptConfig
- // MaxParallelDownloads indicates the maximum layers to pull at the same time. A reasonable default is used if this is left as 0.
+ // A weighted semaphore to limit the amount of concurrently copied layers and configs. Applies to all copy operations using the semaphore. If set, MaxParallelDownloads is ignored.
+ ConcurrentBlobCopiesSemaphore *semaphore.Weighted
+
+ // MaxParallelDownloads indicates the maximum layers to pull at the same time. Applies to a single copy operation. A reasonable default is used if this is left as 0. Ignored if ConcurrentBlobCopiesSemaphore is set.
MaxParallelDownloads uint
// When OptimizeDestinationImageAlreadyExists is set, optimize the copy assuming that the destination image already
@@ -269,7 +224,6 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
if !isTTY(reportWriter) {
progressOutput = ioutil.Discard
}
- copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob()
c := &copier{
dest: dest,
@@ -278,24 +232,38 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
progressOutput: progressOutput,
progressInterval: options.ProgressInterval,
progress: options.Progress,
- copyInParallel: copyInParallel,
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
// we might want to add a separate CommonCtx — or would that be too confusing?
blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
ociDecryptConfig: options.OciDecryptConfig,
ociEncryptConfig: options.OciEncryptConfig,
- maxParallelDownloads: options.MaxParallelDownloads,
downloadForeignLayers: options.DownloadForeignLayers,
}
- // Default to using gzip compression unless specified otherwise.
- if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil {
- c.compressionFormat = compression.Gzip
+
+ // Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel.
+ if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() {
+ c.concurrentBlobCopiesSemaphore = options.ConcurrentBlobCopiesSemaphore
+ if c.concurrentBlobCopiesSemaphore == nil {
+ max := options.MaxParallelDownloads
+ if max == 0 {
+ max = maxParallelDownloads
+ }
+ c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(max))
+ }
} else {
- c.compressionFormat = *options.DestinationCtx.CompressionFormat
+ c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(1))
+ if options.ConcurrentBlobCopiesSemaphore != nil {
+ if err := options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
+ return nil, fmt.Errorf("acquiring semaphore for concurrent blob copies: %w", err)
+ }
+ defer options.ConcurrentBlobCopiesSemaphore.Release(1)
+ }
}
+
if options.DestinationCtx != nil {
- // Note that the compressionLevel can be nil.
+ // Note that compressionFormat and compressionLevel can be nil.
+ c.compressionFormat = options.DestinationCtx.CompressionFormat
c.compressionLevel = options.DestinationCtx.CompressionLevel
}
@@ -904,22 +872,9 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// copyGroup is used to determine if all layers are copied
copyGroup := sync.WaitGroup{}
- // copySemaphore is used to limit the number of parallel downloads to
- // avoid malicious images causing troubles and to be nice to servers.
- var copySemaphore *semaphore.Weighted
- if ic.c.copyInParallel {
- max := ic.c.maxParallelDownloads
- if max == 0 {
- max = maxParallelDownloads
- }
- copySemaphore = semaphore.NewWeighted(int64(max))
- } else {
- copySemaphore = semaphore.NewWeighted(int64(1))
- }
-
data := make([]copyLayerData, numLayers)
copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) {
- defer copySemaphore.Release(1)
+ defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
defer copyGroup.Done()
cld := copyLayerData{}
if !ic.c.downloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
@@ -957,17 +912,17 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
}
if err := func() error { // A scope for defer
- progressPool, progressCleanup := ic.c.newProgressPool(ctx)
- defer func() {
- // Wait for all layers to be copied. progressCleanup() must not be called while any of the copyLayerHelpers interact with the progressPool.
- copyGroup.Wait()
- progressCleanup()
- }()
+ progressPool := ic.c.newProgressPool()
+ defer progressPool.Wait()
+
+ // Ensure we wait for all layers to be copied. progressPool.Wait() must not be called while any of the copyLayerHelpers interact with the progressPool.
+ defer copyGroup.Wait()
for i, srcLayer := range srcInfos {
- err = copySemaphore.Acquire(ctx, 1)
+ err = ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1)
if err != nil {
- return errors.Wrapf(err, "Can't acquire semaphore")
+ // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
+ return fmt.Errorf("copying layer: %w", err)
}
copyGroup.Add(1)
go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool, ic.c.rawSource.Reference().DockerReference())
@@ -1061,15 +1016,13 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
return man, manifestDigest, nil
}
-// newProgressPool creates a *mpb.Progress and a cleanup function.
-// The caller must eventually call the returned cleanup function after the pool will no longer be updated.
-func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
- ctx, cancel := context.WithCancel(ctx)
- pool := mpb.NewWithContext(ctx, mpb.WithWidth(40), mpb.WithOutput(c.progressOutput))
- return pool, func() {
- cancel()
- pool.Wait()
- }
+// newProgressPool creates a *mpb.Progress.
+// The caller must eventually call pool.Wait() after the pool will no longer be updated.
+// NOTE: Every progress bar created within the progress pool must either successfully
+// complete or be aborted, or pool.Wait() will hang. That is typically done
+// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called.
+func (c *copier) newProgressPool() *mpb.Progress {
+ return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput))
}
// customPartialBlobCounter provides a decorator function for the partial blobs retrieval progress bar
@@ -1090,6 +1043,9 @@ func customPartialBlobCounter(filler interface{}, wcc ...decor.WC) decor.Decorat
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
// is ioutil.Discard, the progress bar's output will be discarded
+// NOTE: Every progress bar created within a progress pool must either successfully
+// complete or be aborted, or pool.Wait() will hang. That is typically done
+// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called.
func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
// shortDigestLen is the length of the digest used for blobs.
const shortDigestLen = 12
@@ -1149,15 +1105,23 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
srcInfo := src.ConfigInfo()
if srcInfo.Digest != "" {
+ if err := c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
+ // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
+ return fmt.Errorf("copying config: %w", err)
+ }
+ defer c.concurrentBlobCopiesSemaphore.Release(1)
+
configBlob, err := src.ConfigBlob(ctx)
if err != nil {
return errors.Wrapf(err, "reading config blob %s", srcInfo.Digest)
}
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
- progressPool, progressCleanup := c.newProgressPool(ctx)
- defer progressCleanup()
+ progressPool := c.newProgressPool()
+ defer progressPool.Wait()
bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done")
+ defer bar.Abort(false)
+
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false)
if err != nil {
return types.BlobInfo{}, err
@@ -1184,7 +1148,7 @@ type diffIDResult struct {
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
-// srcRef can be used as an additional hint to the destination during checking whehter a layer can be reused but srcRef can be nil.
+// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil.
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) {
// If the srcInfo doesn't contain compression information, try to compute it from the
// MediaType, which was either read from a manifest by way of LayerInfos() or constructed
@@ -1245,8 +1209,11 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
}
if reused {
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
- bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists")
- bar.SetTotal(0, true)
+ func() { // A scope for defer
+ bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists")
+ defer bar.Abort(false)
+ bar.SetTotal(0, true)
+ }()
// Throw an event that the layer has been skipped
if ic.c.progress != nil && ic.c.progressInterval > 0 {
@@ -1279,40 +1246,49 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable)
imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial)
if okSource && okDest && !diffIDIsNeeded {
- bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
-
- progress := make(chan int64)
- terminate := make(chan interface{})
-
- defer close(terminate)
- defer close(progress)
-
- proxy := imageSourceSeekableProxy{
- source: imgSource,
- progress: progress,
- }
- go func() {
- for {
- select {
- case written := <-progress:
- bar.IncrInt64(written)
- case <-terminate:
- return
+ if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer
+ bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
+ hideProgressBar := true
+ defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily.
+ bar.Abort(hideProgressBar)
+ }()
+
+ progress := make(chan int64)
+ terminate := make(chan interface{})
+
+ defer close(terminate)
+ defer close(progress)
+
+ proxy := imageSourceSeekableProxy{
+ source: imgSource,
+ progress: progress,
+ }
+ go func() {
+ for {
+ select {
+ case written := <-progress:
+ bar.IncrInt64(written)
+ case <-terminate:
+ return
+ }
}
+ }()
+
+ bar.SetTotal(srcInfo.Size, false)
+ info, err := imgDest.PutBlobPartial(ctx, proxy, srcInfo, ic.c.blobInfoCache)
+ if err == nil {
+ bar.SetRefill(srcInfo.Size - bar.Current())
+ bar.SetCurrent(srcInfo.Size)
+ bar.SetTotal(srcInfo.Size, true)
+ hideProgressBar = false
+ logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
+ return true, info
}
- }()
-
- bar.SetTotal(srcInfo.Size, false)
- info, err := imgDest.PutBlobPartial(ctx, proxy, srcInfo, ic.c.blobInfoCache)
- if err == nil {
- bar.SetRefill(srcInfo.Size - bar.Current())
- bar.SetCurrent(srcInfo.Size)
- bar.SetTotal(srcInfo.Size, true)
- logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
- return info, cachedDiffID, nil
+ logrus.Debugf("Failed to retrieve partial blob: %v", err)
+ return false, types.BlobInfo{}
+ }(); reused {
+ return blobInfo, cachedDiffID, nil
}
- bar.Abort(true)
- logrus.Debugf("Failed to retrieve partial blob: %v", err)
}
// Fallback: copy the layer, computing the diffID if we need to do so
@@ -1322,32 +1298,35 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
}
defer srcStream.Close()
- bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
+ return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
+ bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
+ defer bar.Abort(false)
- blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer)
- if err != nil {
- return types.BlobInfo{}, "", err
- }
+ blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer)
+ if err != nil {
+ return types.BlobInfo{}, "", err
+ }
- diffID := cachedDiffID
- if diffIDIsNeeded {
- select {
- case <-ctx.Done():
- return types.BlobInfo{}, "", ctx.Err()
- case diffIDResult := <-diffIDChan:
- if diffIDResult.err != nil {
- return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID")
+ diffID := cachedDiffID
+ if diffIDIsNeeded {
+ select {
+ case <-ctx.Done():
+ return types.BlobInfo{}, "", ctx.Err()
+ case diffIDResult := <-diffIDChan:
+ if diffIDResult.err != nil {
+ return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID")
+ }
+ logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
+ // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
+ // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
+ ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
+ diffID = diffIDResult.digest
}
- logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
- // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
- // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
- ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
- diffID = diffIDResult.digest
}
- }
- bar.SetTotal(srcInfo.Size, true)
- return blobInfo, diffID, nil
+ bar.SetTotal(srcInfo.Size, true)
+ return blobInfo, diffID, nil
+ }()
}
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
@@ -1502,7 +1481,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
// short-circuit conditions
var inputInfo types.BlobInfo
var compressionOperation types.LayerCompression
- uploadCompressionFormat := &c.compressionFormat
+ var uploadCompressionFormat *compressiontypes.Algorithm
srcCompressorName := internalblobinfocache.Uncompressed
if isCompressed {
srcCompressorName = compressionFormat.Name()
@@ -1514,14 +1493,19 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
compressionOperation = types.PreserveOriginal
inputInfo = srcInfo
srcCompressorName = internalblobinfocache.UnknownCompression
- uploadCompressorName = internalblobinfocache.UnknownCompression
uploadCompressionFormat = nil
+ uploadCompressorName = internalblobinfocache.UnknownCompression
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
logrus.Debugf("Compressing blob on the fly")
compressionOperation = types.Compress
pipeReader, pipeWriter := io.Pipe()
defer pipeReader.Close()
+ if c.compressionFormat != nil {
+ uploadCompressionFormat = c.compressionFormat
+ } else {
+ uploadCompressionFormat = defaultCompressionFormat
+ }
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
// we don’t care.
@@ -1530,7 +1514,8 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
inputInfo.Digest = ""
inputInfo.Size = -1
uploadCompressorName = uploadCompressionFormat.Name()
- } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && uploadCompressionFormat.Name() != compressionFormat.Name() {
+ } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed &&
+ c.compressionFormat != nil && c.compressionFormat.Name() != compressionFormat.Name() {
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
// re-compressed using the desired format.
logrus.Debugf("Blob will be converted")
@@ -1545,6 +1530,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
pipeReader, pipeWriter := io.Pipe()
defer pipeReader.Close()
+ uploadCompressionFormat = c.compressionFormat
go c.compressGoroutine(pipeWriter, s, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter
destStream = pipeReader
@@ -1562,14 +1548,13 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
destStream = s
inputInfo.Digest = ""
inputInfo.Size = -1
- uploadCompressorName = internalblobinfocache.Uncompressed
uploadCompressionFormat = nil
+ uploadCompressorName = internalblobinfocache.Uncompressed
} else {
// PreserveOriginal might also need to recompress the original blob if the desired compression format is different.
logrus.Debugf("Using original blob without modification")
compressionOperation = types.PreserveOriginal
inputInfo = srcInfo
- uploadCompressorName = srcCompressorName
// Remember if the original blob was compressed, and if so how, so that if
// LayerInfosForCopy() returned something that differs from what was in the
// source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(),
@@ -1579,6 +1564,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
} else {
uploadCompressionFormat = nil
}
+ uploadCompressorName = srcCompressorName
}
// === Encrypt the stream for valid mediatypes if ociEncryptConfig provided
diff --git a/vendor/github.com/containers/image/v5/copy/digesting_reader.go b/vendor/github.com/containers/image/v5/copy/digesting_reader.go
new file mode 100644
index 000000000..ccc9110ff
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/digesting_reader.go
@@ -0,0 +1,62 @@
+package copy
+
+import (
+ "hash"
+ "io"
+
+ digest "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+type digestingReader struct {
+ source io.Reader
+ digester digest.Digester
+ hash hash.Hash
+ expectedDigest digest.Digest
+ validationFailed bool
+ validationSucceeded bool
+}
+
+// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
+// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
+// (neither is set if EOF is never reached).
+func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
+ var digester digest.Digester
+ if err := expectedDigest.Validate(); err != nil {
+ return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
+ }
+ digestAlgorithm := expectedDigest.Algorithm()
+ if !digestAlgorithm.Available() {
+ return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
+ }
+ digester = digestAlgorithm.Digester()
+
+ return &digestingReader{
+ source: source,
+ digester: digester,
+ hash: digester.Hash(),
+ expectedDigest: expectedDigest,
+ validationFailed: false,
+ }, nil
+}
+
+func (d *digestingReader) Read(p []byte) (int, error) {
+ n, err := d.source.Read(p)
+ if n > 0 {
+ if n2, err := d.hash.Write(p[:n]); n2 != n || err != nil {
+ // Coverage: This should not happen, the hash.Hash interface requires
+ // d.digest.Write to never return an error, and the io.Writer interface
+ // requires n2 == len(input) if no error is returned.
+ return 0, errors.Wrapf(err, "updating digest during verification: %d vs. %d", n2, n)
+ }
+ }
+ if err == io.EOF {
+ actualDigest := d.digester.Digest()
+ if actualDigest != d.expectedDigest {
+ d.validationFailed = true
+ return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
+ }
+ d.validationSucceeded = true
+ }
+ return n, err
+}
diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go
index e3280aa2b..ea20e7c5e 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go
@@ -8,6 +8,7 @@ import (
"path/filepath"
"runtime"
+ "github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@@ -141,7 +142,7 @@ func (d *dirImageDestination) HasThreadSafePutBlob() bool {
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
@@ -163,17 +164,15 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
}
}()
- digester := digest.Canonical.Digester()
- tee := io.TeeReader(stream, digester.Hash())
-
+ digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- size, err := io.Copy(blobFile, tee)
+ size, err := io.Copy(blobFile, stream)
if err != nil {
return types.BlobInfo{}, err
}
- computedDigest := digester.Digest()
+ blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
- return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
+ return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err
@@ -189,7 +188,7 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
}
}
- blobPath := d.ref.layerPath(computedDigest)
+ blobPath := d.ref.layerPath(blobDigest)
// need to explicitly close the file, since a rename won't otherwise not work on Windows
blobFile.Close()
explicitClosed = true
@@ -197,7 +196,7 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
return types.BlobInfo{}, err
}
succeeded = true
- return types.BlobInfo{Digest: computedDigest, Size: size}, nil
+ return types.BlobInfo{Digest: blobDigest, Size: size}, nil
}
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
index 360a7122e..80701a761 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
@@ -17,6 +17,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/uploadreader"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
@@ -124,14 +125,14 @@ func (d *dockerImageDestination) HasThreadSafePutBlob() bool {
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- if inputInfo.Digest.String() != "" {
+ if inputInfo.Digest != "" {
// This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
// But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
@@ -161,10 +162,12 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
return types.BlobInfo{}, errors.Wrap(err, "determining upload URL")
}
- digester := digest.Canonical.Digester()
+ digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
sizeCounter := &sizeCounter{}
+ stream = io.TeeReader(stream, sizeCounter)
+
uploadLocation, err = func() (*url.URL, error) { // A scope for defer
- uploadReader := uploadreader.NewUploadReader(io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter)))
+ uploadReader := uploadreader.NewUploadReader(stream)
// This error text should never be user-visible, we terminate only after makeRequestToResolvedURL
// returns, so there isn’t a way for the error text to be provided to any of our callers.
defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload"))
@@ -186,13 +189,12 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
if err != nil {
return types.BlobInfo{}, err
}
- computedDigest := digester.Digest()
+ blobDigest := digester.Digest()
// FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope)
locationQuery := uploadLocation.Query()
- // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
- locationQuery.Set("digest", computedDigest.String())
+ locationQuery.Set("digest", blobDigest.String())
uploadLocation.RawQuery = locationQuery.Encode()
res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
if err != nil {
@@ -204,9 +206,9 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer to %s", uploadLocation)
}
- logrus.Debugf("Upload of layer %s complete", computedDigest)
- cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref))
- return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
+ logrus.Debugf("Upload of layer %s complete", blobDigest)
+ cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref))
+ return types.BlobInfo{Digest: blobDigest, Size: sizeCounter.size}, nil
}
// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
@@ -485,7 +487,7 @@ func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [
return nil
}
if instanceDigest == nil {
- if d.manifestDigest.String() == "" {
+ if d.manifestDigest == "" {
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
return errors.Errorf("Unknown manifest digest, can't add signatures")
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
index 5dc8e7b1f..1333cf9e2 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
@@ -278,7 +278,78 @@ func (s *dockerImageSource) HasThreadSafeGetBlob() bool {
return true
}
+// splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks
+func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk) {
+ defer close(streams)
+ defer close(errs)
+ currentOffset := uint64(0)
+
+ body = makeBufferedNetworkReader(body, 64, 16384)
+ defer body.Close()
+ for _, c := range chunks {
+ if c.Offset != currentOffset {
+ if c.Offset < currentOffset {
+ errs <- fmt.Errorf("invalid chunk offset specified %v (expected >= %v)", c.Offset, currentOffset)
+ break
+ }
+ toSkip := c.Offset - currentOffset
+ if _, err := io.Copy(ioutil.Discard, io.LimitReader(body, int64(toSkip))); err != nil {
+ errs <- err
+ break
+ }
+ currentOffset += toSkip
+ }
+ s := signalCloseReader{
+ closed: make(chan interface{}),
+ stream: ioutil.NopCloser(io.LimitReader(body, int64(c.Length))),
+ consumeStream: true,
+ }
+ streams <- s
+
+ // Wait until the stream is closed before going to the next chunk
+ <-s.closed
+ currentOffset += c.Length
+ }
+}
+
+// handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan.
+func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk, mediaType string, params map[string]string) {
+ defer close(streams)
+ defer close(errs)
+ if !strings.HasPrefix(mediaType, "multipart/") {
+ streams <- body
+ return
+ }
+ boundary, found := params["boundary"]
+ if !found {
+ errs <- errors.Errorf("could not find boundary")
+ body.Close()
+ return
+ }
+ buffered := makeBufferedNetworkReader(body, 64, 16384)
+ defer buffered.Close()
+ mr := multipart.NewReader(buffered, boundary)
+ for {
+ p, err := mr.NextPart()
+ if err != nil {
+ if err != io.EOF {
+ errs <- err
+ }
+ return
+ }
+ s := signalCloseReader{
+ closed: make(chan interface{}),
+ stream: p,
+ }
+ streams <- s
+ // NextPart() cannot be called while the current part
+ // is being read, so wait until it is closed
+ <-s.closed
+ }
+}
+
// GetBlobAt returns a stream for the specified blob.
+// The specified chunks must be not overlapping and sorted by their offset.
func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
headers := make(map[string][]string)
@@ -305,53 +376,30 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
}
return nil, nil, err
}
- if res.StatusCode != http.StatusPartialContent {
- res.Body.Close()
- return nil, nil, errors.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
- }
- mediaType, params, err := mime.ParseMediaType(res.Header.Get("Content-Type"))
- if err != nil {
- return nil, nil, err
- }
+ switch res.StatusCode {
+ case http.StatusOK:
+ // if the server replied with a 200 status code, convert the full body response to a series of
+ // streams as it would have been done with 206.
+ streams := make(chan io.ReadCloser)
+ errs := make(chan error)
+ go splitHTTP200ResponseToPartial(streams, errs, res.Body, chunks)
+ return streams, errs, nil
+ case http.StatusPartialContent:
+ mediaType, params, err := mime.ParseMediaType(res.Header.Get("Content-Type"))
+ if err != nil {
+ return nil, nil, err
+ }
- streams := make(chan io.ReadCloser)
- errs := make(chan error)
+ streams := make(chan io.ReadCloser)
+ errs := make(chan error)
- go func() {
- defer close(streams)
- defer close(errs)
- if !strings.HasPrefix(mediaType, "multipart/") {
- streams <- res.Body
- return
- }
- boundary, found := params["boundary"]
- if !found {
- errs <- errors.Errorf("could not find boundary")
- return
- }
- buffered := makeBufferedNetworkReader(res.Body, 64, 16384)
- defer buffered.Close()
- mr := multipart.NewReader(buffered, boundary)
- for {
- p, err := mr.NextPart()
- if err != nil {
- if err != io.EOF {
- errs <- err
- }
- return
- }
- s := signalCloseReader{
- Closed: make(chan interface{}),
- Stream: p,
- }
- streams <- s
- // NextPart() cannot be called while the current part
- // is being read, so wait until it is closed
- <-s.Closed
- }
- }()
- return streams, errs, nil
+ go handle206Response(streams, errs, res.Body, chunks, mediaType, params)
+ return streams, errs, nil
+ default:
+ res.Body.Close()
+ return nil, nil, errors.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+ }
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
@@ -585,7 +633,7 @@ type bufferedNetworkReaderBuffer struct {
}
type bufferedNetworkReader struct {
- stream io.Reader
+ stream io.ReadCloser
emptyBuffer chan *bufferedNetworkReaderBuffer
readyBuffer chan *bufferedNetworkReaderBuffer
terminate chan bool
@@ -611,9 +659,10 @@ func handleBufferedNetworkReader(br *bufferedNetworkReader) {
}
}
-func (n *bufferedNetworkReader) Close() {
+func (n *bufferedNetworkReader) Close() error {
close(n.terminate)
close(n.emptyBuffer)
+ return n.stream.Close()
}
func (n *bufferedNetworkReader) read(p []byte) (int, error) {
@@ -657,7 +706,7 @@ func (n *bufferedNetworkReader) Read(p []byte) (int, error) {
return n.read(p)
}
-func makeBufferedNetworkReader(stream io.Reader, nBuffers, bufferSize uint) *bufferedNetworkReader {
+func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint) *bufferedNetworkReader {
br := bufferedNetworkReader{
stream: stream,
emptyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers),
@@ -680,15 +729,22 @@ func makeBufferedNetworkReader(stream io.Reader, nBuffers, bufferSize uint) *buf
}
type signalCloseReader struct {
- Closed chan interface{}
- Stream io.ReadCloser
+ closed chan interface{}
+ stream io.ReadCloser
+ consumeStream bool
}
func (s signalCloseReader) Read(p []byte) (int, error) {
- return s.Stream.Read(p)
+ return s.stream.Read(p)
}
func (s signalCloseReader) Close() error {
- defer close(s.Closed)
- return s.Stream.Close()
+ defer close(s.closed)
+ if s.consumeStream {
+ if _, err := io.Copy(ioutil.Discard, s.stream); err != nil {
+ s.stream.Close()
+ return err
+ }
+ }
+ return s.stream.Close()
}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
index a558657b6..44b0af110 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
@@ -10,6 +10,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
@@ -86,7 +87,7 @@ func (d *Destination) HasThreadSafePutBlob() bool {
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
@@ -95,7 +96,7 @@ func (d *Destination) HasThreadSafePutBlob() bool {
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed data.
- if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
+ if inputInfo.Size == -1 || inputInfo.Digest == "" {
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
if err != nil {
@@ -104,10 +105,9 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
defer os.Remove(streamCopy.Name())
defer streamCopy.Close()
- digester := digest.Canonical.Digester()
- tee := io.TeeReader(stream, digester.Hash())
+ digester, stream2 := putblobdigest.DigestIfUnknown(stream, inputInfo)
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- size, err := io.Copy(streamCopy, tee)
+ size, err := io.Copy(streamCopy, stream2)
if err != nil {
return types.BlobInfo{}, err
}
@@ -116,9 +116,7 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
return types.BlobInfo{}, err
}
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
- if inputInfo.Digest == "" {
- inputInfo.Digest = digester.Digest()
- }
+ inputInfo.Digest = digester.Digest()
stream = streamCopy
logrus.Debugf("... streaming done")
}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go
index 88e123cdd..bf6cc87d4 100644
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go
+++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build linux
// +build linux
package keyctl
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
index 91c64a1b8..5eaad615c 100644
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
+++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build linux
// +build linux
// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface)
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go
index ae9697149..5f4d2157a 100644
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go
+++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build linux
// +build linux
package keyctl
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go
index 196c82760..f61666e42 100644
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go
+++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build linux
// +build linux
package keyctl
diff --git a/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go b/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go
new file mode 100644
index 000000000..b8d3a7e56
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go
@@ -0,0 +1,57 @@
+package putblobdigest
+
+import (
+ "io"
+
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// Digester computes a digest of the provided stream, if not known yet.
+type Digester struct {
+ knownDigest digest.Digest // Or ""
+ digester digest.Digester // Or nil
+}
+
+// newDigester initiates computation of a digest.Canonical digest of stream,
+// if !validDigest; otherwise it just records knownDigest to be returned later.
+// The caller MUST use the returned stream instead of the original value.
+func newDigester(stream io.Reader, knownDigest digest.Digest, validDigest bool) (Digester, io.Reader) {
+ if validDigest {
+ return Digester{knownDigest: knownDigest}, stream
+ } else {
+ res := Digester{
+ digester: digest.Canonical.Digester(),
+ }
+ stream = io.TeeReader(stream, res.digester.Hash())
+ return res, stream
+ }
+}
+
+// DigestIfUnknown initiates computation of a digest.Canonical digest of stream,
+// if no digest is supplied in the provided blobInfo; otherwise blobInfo.Digest will
+// be used (accepting any algorithm).
+// The caller MUST use the returned stream instead of the original value.
+func DigestIfUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) {
+ d := blobInfo.Digest
+ return newDigester(stream, d, d != "")
+}
+
+// DigestIfCanonicalUnknown initiates computation of a digest.Canonical digest of stream,
+// if a digest.Canonical digest is not supplied in the provided blobInfo;
+// otherwise blobInfo.Digest will be used.
+// The caller MUST use the returned stream instead of the original value.
+func DigestIfCanonicalUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) {
+ d := blobInfo.Digest
+ return newDigester(stream, d, d != "" && d.Algorithm() == digest.Canonical)
+}
+
+// Digest() returns a digest value possibly computed by Digester.
+// This must be called only after all of the stream returned by a Digester constructor
+// has been successfully read.
+func (d Digester) Digest() digest.Digest {
+ if d.digester != nil {
+ return d.digester.Digest()
+ }
+ return d.knownDigest
+}
diff --git a/vendor/github.com/containers/image/v5/internal/types/types.go b/vendor/github.com/containers/image/v5/internal/types/types.go
index e0355a477..388f8cf3b 100644
--- a/vendor/github.com/containers/image/v5/internal/types/types.go
+++ b/vendor/github.com/containers/image/v5/internal/types/types.go
@@ -70,6 +70,7 @@ type ImageSourceChunk struct {
// This API is experimental and can be changed without bumping the major version number.
type ImageSourceSeekable interface {
// GetBlobAt returns a stream for the specified blob.
+ // The specified chunks must be not overlapping and sorted by their offset.
GetBlobAt(context.Context, publicTypes.BlobInfo, []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
}
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
index 065a0b055..3d8738db5 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
@@ -88,7 +88,7 @@ func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool {
}
// PutBlob writes contents of stream and returns data representing the result.
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
// May update cache.
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
index d1d06d64d..d0ee72635 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
@@ -9,6 +9,7 @@ import (
"path/filepath"
"runtime"
+ "github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
@@ -115,7 +116,7 @@ func (d *ociImageDestination) HasThreadSafePutBlob() bool {
}
// PutBlob writes contents of stream and returns data representing the result.
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
// May update cache.
@@ -138,17 +139,15 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
}
}()
- digester := digest.Canonical.Digester()
- tee := io.TeeReader(stream, digester.Hash())
-
+ digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- size, err := io.Copy(blobFile, tee)
+ size, err := io.Copy(blobFile, stream)
if err != nil {
return types.BlobInfo{}, err
}
- computedDigest := digester.Digest()
+ blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
- return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
+ return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err
@@ -164,7 +163,7 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
}
}
- blobPath, err := d.ref.blobPath(computedDigest, d.sharedBlobDir)
+ blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir)
if err != nil {
return types.BlobInfo{}, err
}
@@ -179,7 +178,7 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
return types.BlobInfo{}, err
}
succeeded = true
- return types.BlobInfo{Digest: computedDigest, Size: size}, nil
+ return types.BlobInfo{Digest: blobDigest, Size: size}, nil
}
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
index f9f811784..4ffbced6b 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
@@ -279,7 +279,7 @@ func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*rest
}
// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable.
-// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config,
+// ConfirmUsable looks a particular context and determines if that particular part of the config is usable. There might still be errors in the config,
// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible.
func (config *directClientConfig) ConfirmUsable() error {
var validationErrors []error
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go
index 6ea65bcf3..c7c6cf694 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift.go
@@ -395,7 +395,7 @@ func (d *openshiftImageDestination) HasThreadSafePutBlob() bool {
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
index c91a49c57..3eb2a2cba 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
@@ -1,3 +1,4 @@
+//go:build containers_image_ostree
// +build containers_image_ostree
package ostree
@@ -20,6 +21,7 @@ import (
"time"
"unsafe"
+ "github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
@@ -138,7 +140,7 @@ func (d *ostreeImageDestination) HasThreadSafePutBlob() bool {
}
// PutBlob writes contents of stream and returns data representing the result.
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
// May update cache.
@@ -158,25 +160,23 @@ func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
defer blobFile.Close()
- digester := digest.Canonical.Digester()
- tee := io.TeeReader(stream, digester.Hash())
-
+ digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- size, err := io.Copy(blobFile, tee)
+ size, err := io.Copy(blobFile, stream)
if err != nil {
return types.BlobInfo{}, err
}
- computedDigest := digester.Digest()
+ blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
- return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
+ return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err
}
- hash := computedDigest.Hex()
- d.blobs[hash] = &blobToImport{Size: size, Digest: computedDigest, BlobPath: blobPath}
- return types.BlobInfo{Digest: computedDigest, Size: size}, nil
+ hash := blobDigest.Hex()
+ d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
+ return types.BlobInfo{Digest: blobDigest, Size: size}, nil
}
func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error {
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go
index 4948ec664..d30c764a6 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_src.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go
@@ -1,3 +1,4 @@
+//go:build containers_image_ostree
// +build containers_image_ostree
package ostree
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
index a55147b85..1e35ab605 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
@@ -1,3 +1,4 @@
+//go:build containers_image_ostree
// +build containers_image_ostree
package ostree
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
index c82a9e1a0..e37f4c19e 100644
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
@@ -478,7 +478,7 @@ func listAuthsFromCredHelper(credHelper string) (map[string]string, error) {
return helperclient.List(p)
}
-// getPathToAuth gets the path of the auth.json file used for reading and writting credentials
+// getPathToAuth gets the path of the auth.json file used for reading and writing credentials
// returns the path, and a bool specifies whether the file is in legacy format
func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
return getPathToAuthWithOS(sys, runtime.GOOS)
@@ -601,10 +601,18 @@ func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig,
if err != nil {
return types.DockerAuthConfig{}, err
}
- return types.DockerAuthConfig{
- Username: creds.Username,
- Password: creds.Secret,
- }, nil
+
+ switch creds.Username {
+ case "<token>":
+ return types.DockerAuthConfig{
+ IdentityToken: creds.Secret,
+ }, nil
+ default:
+ return types.DockerAuthConfig{
+ Username: creds.Username,
+ Password: creds.Secret,
+ }, nil
+ }
}
func setAuthToCredHelper(credHelper, registry, username, password string) error {
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go
index 1354ee46d..0bf161259 100644
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go
@@ -10,7 +10,7 @@ import (
)
// NOTE: none of the functions here are currently used. If we ever want to
-// reenable keyring support, we should introduce a similar built-in credential
+// re-enable keyring support, we should introduce a similar built-in credential
// helpers as for `sysregistriesv2.AuthenticationFileHelper`.
const keyDescribePrefix = "container-registry-login:" //nolint:deadcode,unused
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go
index 65e580410..d9827d8ed 100644
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux && (!386 || !amd64)
// +build !linux
// +build !386 !amd64
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
index a0afc34b4..6ae74d430 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
@@ -1,3 +1,4 @@
+//go:build !containers_image_openpgp
// +build !containers_image_openpgp
package signature
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
index a05760284..0a09788f9 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
@@ -1,3 +1,4 @@
+//go:build containers_image_openpgp
// +build containers_image_openpgp
package signature
diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go
index 6b0fea61a..7329ef6ee 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_image.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_image.go
@@ -1,3 +1,4 @@
+//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage
@@ -17,13 +18,14 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/tmpdir"
internalTypes "github.com/containers/image/v5/internal/types"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
- "github.com/containers/storage/drivers"
+ graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chunked"
"github.com/containers/storage/pkg/ioutils"
@@ -34,8 +36,10 @@ import (
)
var (
- // ErrBlobDigestMismatch is returned when PutBlob() is given a blob
+ // ErrBlobDigestMismatch could potentially be returned when PutBlob() is given a blob
// with a digest-based name that doesn't match its contents.
+ // Deprecated: PutBlob() doesn't do this any more (it just accepts the caller’s value),
+ // and there is no known user of this error.
ErrBlobDigestMismatch = stderrors.New("blob digest mismatch")
// ErrBlobSizeMismatch is returned when PutBlob() is given a blob
// with an expected size that doesn't match the reader.
@@ -468,7 +472,7 @@ func (s *storageImageDestination) HasThreadSafePutBlob() bool {
}
// PutBlob writes contents of stream and returns data representing the result.
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
// May update cache.
@@ -482,26 +486,28 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
Digest: "",
Size: -1,
}
- // Set up to digest the blob and count its size while saving it to a file.
- hasher := digest.Canonical.Digester()
- if blobinfo.Digest.Validate() == nil {
- if a := blobinfo.Digest.Algorithm(); a.Available() {
- hasher = a.Digester()
+ if blobinfo.Digest != "" {
+ if err := blobinfo.Digest.Validate(); err != nil {
+ return errorBlobInfo, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err)
}
}
- diffID := digest.Canonical.Digester()
+
+ // Set up to digest the blob if necessary, and count its size while saving it to a file.
filename := s.computeNextBlobCacheFile()
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
if err != nil {
return errorBlobInfo, errors.Wrapf(err, "creating temporary file %q", filename)
}
defer file.Close()
- counter := ioutils.NewWriteCounter(hasher.Hash())
- reader := io.TeeReader(io.TeeReader(stream, counter), file)
- decompressed, err := archive.DecompressStream(reader)
+ counter := ioutils.NewWriteCounter(file)
+ stream = io.TeeReader(stream, counter)
+ digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
+ decompressed, err := archive.DecompressStream(stream)
if err != nil {
return errorBlobInfo, errors.Wrap(err, "setting up to decompress blob")
}
+
+ diffID := digest.Canonical.Digester()
// Copy the data to the file.
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
_, err = io.Copy(diffID.Hash(), decompressed)
@@ -509,28 +515,25 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
if err != nil {
return errorBlobInfo, errors.Wrapf(err, "storing blob to file %q", filename)
}
- // Ensure that any information that we were given about the blob is correct.
- if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() {
- return errorBlobInfo, errors.WithStack(ErrBlobDigestMismatch)
- }
- if blobinfo.Size >= 0 && blobinfo.Size != counter.Count {
+
+ // Determine blob properties, and fail if information that we were given about the blob
+ // is known to be incorrect.
+ blobDigest := digester.Digest()
+ blobSize := blobinfo.Size
+ if blobSize < 0 {
+ blobSize = counter.Count
+ } else if blobinfo.Size != counter.Count {
return errorBlobInfo, errors.WithStack(ErrBlobSizeMismatch)
}
+
// Record information about the blob.
s.lock.Lock()
- s.blobDiffIDs[hasher.Digest()] = diffID.Digest()
- s.fileSizes[hasher.Digest()] = counter.Count
- s.filenames[hasher.Digest()] = filename
+ s.blobDiffIDs[blobDigest] = diffID.Digest()
+ s.fileSizes[blobDigest] = counter.Count
+ s.filenames[blobDigest] = filename
s.lock.Unlock()
- blobDigest := blobinfo.Digest
- if blobDigest.Validate() != nil {
- blobDigest = hasher.Digest()
- }
- blobSize := blobinfo.Size
- if blobSize < 0 {
- blobSize = counter.Count
- }
- // This is safe because we have just computed both values ourselves.
+ // This is safe because we have just computed diffID, and blobDigest was either computed
+ // by us, or validated by the caller (usually copy.digestingReader).
cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
return types.BlobInfo{
Digest: blobDigest,
@@ -813,7 +816,7 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.
//
// The conceptual benefit of this design is that caller can continue
// pulling layers after an early return. At any given time, only one
- // caller is the "worker" routine comitting layers. All other routines
+ // caller is the "worker" routine committing layers. All other routines
// can continue pulling and queuing in layers.
s.lock.Lock()
s.indexToPulledLayerInfo[index] = &manifest.LayerInfo{
@@ -852,7 +855,7 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.
// must guarantee that, at any given time, at most one goroutine may execute
// `commitLayer()`.
func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error {
- // Already commited? Return early.
+ // Already committed? Return early.
if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
return nil
}
@@ -1004,7 +1007,10 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
defer file.Close()
// Build the new layer using the diff, regardless of where it came from.
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file)
+ layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
+ OriginalDigest: blob.Digest,
+ UncompressedDigest: diffID,
+ }, file)
if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
return errors.Wrapf(err, "adding layer with blob %q", blob.Digest)
}
@@ -1065,7 +1071,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
if len(layerBlobs) > 0 { // Can happen when using caches
prev := s.indexToStorageID[len(layerBlobs)-1]
if prev == nil {
- return errors.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been commited (lastLayer == nil)", len(layerBlobs)-1)
+ return errors.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1)
}
lastLayer = *prev
}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go
index 1aafe9068..7c6da112c 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_reference.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go
@@ -1,3 +1,4 @@
+//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage
diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go
index d4c85b725..ab59c8a29 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_transport.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go
@@ -1,3 +1,4 @@
+//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go
index 82224052e..ffac6e0b8 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go
@@ -1,3 +1,4 @@
+//go:build !containers_image_docker_daemon_stub
// +build !containers_image_docker_daemon_stub
package alltransports
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go
index d13700799..ddc347bf3 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go
@@ -1,3 +1,4 @@
+//go:build containers_image_docker_daemon_stub
// +build containers_image_docker_daemon_stub
package alltransports
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go
index 72432d1ef..2340702bd 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go
@@ -1,3 +1,4 @@
+//go:build containers_image_ostree && linux
// +build containers_image_ostree,linux
package alltransports
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go
index f4a862bd4..8c4175188 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go
@@ -1,3 +1,4 @@
+//go:build !containers_image_ostree || !linux
// +build !containers_image_ostree !linux
package alltransports
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go
index 7041eb876..1e399cdb0 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/storage.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go
@@ -1,3 +1,4 @@
+//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package alltransports
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go
index 67f0291cc..30802661f 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go
@@ -1,3 +1,4 @@
+//go:build containers_image_storage_stub
// +build containers_image_storage_stub
package alltransports
diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go
index 1c4a1419f..354b3f663 100644
--- a/vendor/github.com/containers/image/v5/types/types.go
+++ b/vendor/github.com/containers/image/v5/types/types.go
@@ -299,7 +299,7 @@ type ImageDestination interface {
IgnoresEmbeddedDockerReference() bool
// PutBlob writes contents of stream and returns data representing the result.
- // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+ // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
// May update cache.
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index 478a03b05..b9f8c3e9f 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -6,9 +6,9 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
- VersionMinor = 15
+ VersionMinor = 16
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 2
+ VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index a95a46d9f..2aeaa11ee 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.34.1
+1.35.0
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index d2d438d93..10204a12a 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -16,7 +16,7 @@ require (
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
github.com/moby/sys/mountinfo v0.4.1
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/runc v1.0.1
+ github.com/opencontainers/runc v1.0.2
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/selinux v1.8.4
github.com/pkg/errors v0.9.1
@@ -25,8 +25,8 @@ require (
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
github.com/tchap/go-patricia v2.3.0+incompatible
github.com/ulikunitz/xz v0.5.10
- github.com/vbatts/tar-split v0.11.1
+ github.com/vbatts/tar-split v0.11.2
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
- golang.org/x/sys v0.0.0-20210426230700-d19ff857e887
+ golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55
gotest.tools v2.2.0+incompatible
)
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index da7a8f53e..1f5be8df5 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -469,8 +469,8 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runc v1.0.1 h1:G18PGckGdAm3yVQRWDVQ1rLSLntiniKJ0cNRT2Tm5gs=
-github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
+github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg=
+github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -585,8 +585,9 @@ github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
-github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
+github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
+github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
@@ -770,8 +771,9 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55 h1:rw6UNGRMfarCepjI8qOepea/SXwIBVfTKjztZ5gBbq4=
+golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index b85ff7e70..32ba20685 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -803,7 +803,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
r.driver.Remove(id)
return nil, -1, err
}
- size, err = r.ApplyDiff(layer.ID, diff)
+ size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff)
if err != nil {
if r.Delete(layer.ID) != nil {
// Either a driver error or an error saving.
@@ -1505,6 +1505,10 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
}
func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) {
+ return r.applyDiffWithOptions(to, nil, diff)
+}
+
+func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) {
if !r.IsReadWrite() {
return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath())
}
@@ -1519,11 +1523,33 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
if err != nil && err != io.EOF {
return -1, err
}
-
compression := archive.DetectCompression(header[:n])
- compressedDigest := digest.Canonical.Digester()
- compressedCounter := ioutils.NewWriteCounter(compressedDigest.Hash())
- defragmented := io.TeeReader(io.MultiReader(bytes.NewBuffer(header[:n]), diff), compressedCounter)
+ defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff)
+
+ // Decide if we need to compute digests
+ var compressedDigest, uncompressedDigest digest.Digest // = ""
+ var compressedDigester, uncompressedDigester digest.Digester // = nil
+ if layerOptions != nil && layerOptions.OriginalDigest != "" &&
+ layerOptions.OriginalDigest.Algorithm() == digest.Canonical {
+ compressedDigest = layerOptions.OriginalDigest
+ } else {
+ compressedDigester = digest.Canonical.Digester()
+ }
+ if layerOptions != nil && layerOptions.UncompressedDigest != "" &&
+ layerOptions.UncompressedDigest.Algorithm() == digest.Canonical {
+ uncompressedDigest = layerOptions.UncompressedDigest
+ } else {
+ uncompressedDigester = digest.Canonical.Digester()
+ }
+
+ var compressedWriter io.Writer
+ if compressedDigester != nil {
+ compressedWriter = compressedDigester.Hash()
+ } else {
+ compressedWriter = ioutil.Discard
+ }
+ compressedCounter := ioutils.NewWriteCounter(compressedWriter)
+ defragmented = io.TeeReader(defragmented, compressedCounter)
tsdata := bytes.Buffer{}
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
@@ -1539,8 +1565,6 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
return -1, err
}
defer uncompressed.Close()
- uncompressedDigest := digest.Canonical.Digester()
- uncompressedCounter := ioutils.NewWriteCounter(uncompressedDigest.Hash())
uidLog := make(map[uint32]struct{})
gidLog := make(map[uint32]struct{})
idLogger, err := tarlog.NewLogger(func(h *tar.Header) {
@@ -1553,7 +1577,12 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
return -1, err
}
defer idLogger.Close()
- payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, io.MultiWriter(uncompressedCounter, idLogger)), metadata, storage.NewDiscardFilePutter())
+ uncompressedCounter := ioutils.NewWriteCounter(idLogger)
+ uncompressedWriter := (io.Writer)(uncompressedCounter)
+ if uncompressedDigester != nil {
+ uncompressedWriter = io.MultiWriter(uncompressedWriter, uncompressedDigester.Hash())
+ }
+ payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedWriter), metadata, storage.NewDiscardFilePutter())
if err != nil {
return -1, err
}
@@ -1575,6 +1604,12 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
return -1, err
}
}
+ if compressedDigester != nil {
+ compressedDigest = compressedDigester.Digest()
+ }
+ if uncompressedDigester != nil {
+ uncompressedDigest = uncompressedDigester.Digest()
+ }
updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) {
var newList []string
@@ -1594,11 +1629,11 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
(*m)[newvalue] = append((*m)[newvalue], id)
}
}
- updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest.Digest(), layer.ID)
- layer.CompressedDigest = compressedDigest.Digest()
+ updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID)
+ layer.CompressedDigest = compressedDigest
layer.CompressedSize = compressedCounter.Count
- updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest.Digest(), layer.ID)
- layer.UncompressedDigest = uncompressedDigest.Digest()
+ updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest, layer.ID)
+ layer.UncompressedDigest = uncompressedDigest
layer.UncompressedSize = uncompressedCounter.Count
layer.CompressionType = compression
layer.UIDs = make([]uint32, 0, len(uidLog))
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
index 0f14d8af9..3aea77f22 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -32,7 +32,7 @@ import (
const (
maxNumberMissingChunks = 1024
- newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_WRONLY | unix.O_EXCL)
+ newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY)
containersOverrideXattr = "user.containers.override_stat"
bigDataKey = "zstd-chunked-manifest"
)
@@ -54,7 +54,8 @@ func timeToTimespec(time time.Time) (ts unix.Timespec) {
return unix.NsecToTimespec(time.UnixNano())
}
-func copyFileContent(src, destFile, root string, dirfd int, missingDirsMode, mode os.FileMode) (*os.File, int64, error) {
+func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) {
+ src := fmt.Sprintf("/proc/self/fd/%d", srcFd)
st, err := os.Stat(src)
if err != nil {
return nil, -1, err
@@ -62,8 +63,32 @@ func copyFileContent(src, destFile, root string, dirfd int, missingDirsMode, mod
copyWithFileRange, copyWithFileClone := true, true
+ if useHardLinks {
+ destDirPath := filepath.Dir(destFile)
+ destBase := filepath.Base(destFile)
+ destDir, err := openFileUnderRoot(destDirPath, dirfd, 0, mode)
+ if err == nil {
+ defer destDir.Close()
+
+ doLink := func() error {
+ return unix.Linkat(srcFd, "", int(destDir.Fd()), destBase, unix.AT_EMPTY_PATH)
+ }
+
+ err := doLink()
+
+ // if the destination exists, unlink it first and try again
+ if err != nil && os.IsExist(err) {
+ unix.Unlinkat(int(destDir.Fd()), destBase, 0)
+ err = doLink()
+ }
+ if err == nil {
+ return nil, st.Size(), nil
+ }
+ }
+ }
+
// If the destination file already exists, we shouldn't blow it away
- dstFile, err := openFileUnderRoot(destFile, root, dirfd, newFileFlags, mode)
+ dstFile, err := openFileUnderRoot(destFile, dirfd, newFileFlags, mode)
if err != nil {
return nil, -1, err
}
@@ -148,7 +173,39 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
}, nil
}
-func findFileInOtherLayers(file internal.ZstdFileMetadata, root string, dirfd int, layersMetadata map[string]map[string]*internal.ZstdFileMetadata, layersTarget map[string]string, missingDirsMode os.FileMode) (*os.File, int64, error) {
+// copyFileFromOtherLayer copies a file from another layer
+// file is the file to look for.
+// source is the path to the source layer checkout.
+// otherFile contains the metadata for the file.
+// dirfd is an open file descriptor to the destination root directory.
+// useHardLinks defines whether the deduplication can be performed using hard links.
+func copyFileFromOtherLayer(file internal.ZstdFileMetadata, source string, otherFile *internal.ZstdFileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
+ srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0)
+ if err != nil {
+ return false, nil, 0, err
+ }
+ defer unix.Close(srcDirfd)
+
+ srcFile, err := openFileUnderRoot(otherFile.Name, srcDirfd, unix.O_RDONLY, 0)
+ if err != nil {
+ return false, nil, 0, err
+ }
+ defer srcFile.Close()
+
+ dstFile, written, err := copyFileContent(int(srcFile.Fd()), file.Name, dirfd, 0, useHardLinks)
+ if err != nil {
+ return false, nil, 0, err
+ }
+ return true, dstFile, written, err
+}
+
+// findFileInOtherLayers finds the specified file in other layers.
+// file is the file to look for.
+// dirfd is an open file descriptor to the checkout root directory.
+// layersMetadata contains the metadata for each layer in the storage.
+// layersTarget maps each layer to its checkout on disk.
+// useHardLinks defines whether the deduplication can be performed using hard links.
+func findFileInOtherLayers(file internal.ZstdFileMetadata, dirfd int, layersMetadata map[string]map[string]*internal.ZstdFileMetadata, layersTarget map[string]string, useHardLinks bool) (bool, *os.File, int64, error) {
// this is ugly, needs to be indexed
for layerID, checksums := range layersMetadata {
m, found := checksums[file.Digest]
@@ -161,27 +218,12 @@ func findFileInOtherLayers(file internal.ZstdFileMetadata, root string, dirfd in
continue
}
- srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0)
- if err != nil {
- continue
- }
- defer unix.Close(srcDirfd)
-
- srcFile, err := openFileUnderRoot(m.Name, source, srcDirfd, unix.O_RDONLY, 0)
- if err != nil {
- continue
+ found, dstFile, written, err := copyFileFromOtherLayer(file, source, m, dirfd, useHardLinks)
+ if found && err == nil {
+ return found, dstFile, written, err
}
- defer srcFile.Close()
-
- srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFile.Fd())
-
- dstFile, written, err := copyFileContent(srcPath, file.Name, root, dirfd, missingDirsMode, 0)
- if err != nil {
- continue
- }
- return dstFile, written, nil
}
- return nil, 0, nil
+ return false, nil, 0, nil
}
func getFileDigest(f *os.File) (digest.Digest, error) {
@@ -195,25 +237,28 @@ func getFileDigest(f *os.File) (digest.Digest, error) {
// findFileOnTheHost checks whether the requested file already exist on the host and copies the file content from there if possible.
// It is currently implemented to look only at the file with the same path. Ideally it can detect the same content also at different
// paths.
-func findFileOnTheHost(file internal.ZstdFileMetadata, root string, dirfd int, missingDirsMode os.FileMode) (*os.File, int64, error) {
+// file is the file to look for.
+// dirfd is an open fd to the destination checkout.
+// useHardLinks defines whether the deduplication can be performed using hard links.
+func findFileOnTheHost(file internal.ZstdFileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
sourceFile := filepath.Clean(filepath.Join("/", file.Name))
if !strings.HasPrefix(sourceFile, "/usr/") {
// limit host deduplication to files under /usr.
- return nil, 0, nil
+ return false, nil, 0, nil
}
st, err := os.Stat(sourceFile)
if err != nil || !st.Mode().IsRegular() {
- return nil, 0, nil
+ return false, nil, 0, nil
}
if st.Size() != file.Size {
- return nil, 0, nil
+ return false, nil, 0, nil
}
fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0)
if err != nil {
- return nil, 0, nil
+ return false, nil, 0, nil
}
f := os.NewFile(uintptr(fd), "fd")
@@ -221,35 +266,38 @@ func findFileOnTheHost(file internal.ZstdFileMetadata, root string, dirfd int, m
manifestChecksum, err := digest.Parse(file.Digest)
if err != nil {
- return nil, 0, err
+ return false, nil, 0, err
}
checksum, err := getFileDigest(f)
if err != nil {
- return nil, 0, err
+ return false, nil, 0, err
}
if checksum != manifestChecksum {
- return nil, 0, nil
+ return false, nil, 0, nil
}
- dstFile, written, err := copyFileContent(fmt.Sprintf("/proc/self/fd/%d", fd), file.Name, root, dirfd, missingDirsMode, 0)
+ dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks)
if err != nil {
- return nil, 0, nil
+ return false, nil, 0, nil
}
// calculate the checksum again to make sure the file wasn't modified while it was copied
if _, err := f.Seek(0, 0); err != nil {
- return nil, 0, err
+ dstFile.Close()
+ return false, nil, 0, err
}
checksum, err = getFileDigest(f)
if err != nil {
- return nil, 0, err
+ dstFile.Close()
+ return false, nil, 0, err
}
if checksum != manifestChecksum {
- return nil, 0, nil
+ dstFile.Close()
+ return false, nil, 0, nil
}
- return dstFile, written, nil
+ return true, dstFile, written, nil
}
func maybeDoIDRemap(manifest []internal.ZstdFileMetadata, options *archive.TarOptions) error {
@@ -292,6 +340,7 @@ type missingChunk struct {
Files []missingFile
}
+// setFileAttrs sets the file attributes for file given metadata
func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
if file == nil || file.Fd() < 0 {
return errors.Errorf("invalid file")
@@ -333,7 +382,12 @@ func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.ZstdFileMe
return nil
}
-func openFileUnderRoot(name, root string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) {
+// openFileUnderRoot safely opens a file under the specified root directory using openat2
+// name is the path to open relative to dirfd.
+// dirfd is an open file descriptor to the target checkout directory.
+// flags are the flags top pass to the open syscall.
+// mode specifies the mode to use for newly created files.
+func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) {
how := unix.OpenHow{
Flags: flags,
Mode: uint64(mode & 07777),
@@ -347,8 +401,8 @@ func openFileUnderRoot(name, root string, dirfd int, flags uint64, mode os.FileM
return os.NewFile(uintptr(fd), name), nil
}
-func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, missingDirsMode, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) (err error) {
- file, err := openFileUnderRoot(metadata.Name, dest, dirfd, newFileFlags, 0)
+func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) (err error) {
+ file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0)
if err != nil {
return err
}
@@ -381,7 +435,7 @@ func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, missingD
return setFileAttrs(file, mode, metadata, options)
}
-func storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingChunks []missingChunk, missingDirsMode os.FileMode, options *archive.TarOptions) error {
+func storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error {
for mc := 0; ; mc++ {
var part io.ReadCloser
select {
@@ -412,7 +466,7 @@ func storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string,
limitReader := io.LimitReader(part, mf.Length())
- if err := createFileFromZstdStream(dest, dirfd, limitReader, missingDirsMode, os.FileMode(mf.File.Mode), mf.File, options); err != nil {
+ if err := createFileFromZstdStream(dest, dirfd, limitReader, os.FileMode(mf.File.Mode), mf.File, options); err != nil {
part.Close()
return err
}
@@ -462,7 +516,7 @@ func mergeMissingChunks(missingChunks []missingChunk, target int) []missingChunk
return newMissingChunks
}
-func retrieveMissingFiles(input *chunkedZstdDiffer, dest string, dirfd int, missingChunks []missingChunk, missingDirsMode os.FileMode, options *archive.TarOptions) error {
+func retrieveMissingFiles(input *chunkedZstdDiffer, dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error {
var chunksToRequest []ImageSourceChunk
for _, c := range missingChunks {
chunksToRequest = append(chunksToRequest, c.RawChunk)
@@ -492,19 +546,19 @@ func retrieveMissingFiles(input *chunkedZstdDiffer, dest string, dirfd int, miss
return err
}
- if err := storeMissingFiles(streams, errs, dest, dirfd, missingChunks, missingDirsMode, options); err != nil {
+ if err := storeMissingFiles(streams, errs, dest, dirfd, missingChunks, options); err != nil {
return err
}
return nil
}
-func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
+func safeMkdir(dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
parent := filepath.Dir(metadata.Name)
base := filepath.Base(metadata.Name)
parentFd := dirfd
if parent != "." {
- parentFile, err := openFileUnderRoot(parent, target, dirfd, unix.O_DIRECTORY|unix.O_PATH|unix.O_RDONLY, 0)
+ parentFile, err := openFileUnderRoot(parent, dirfd, unix.O_DIRECTORY|unix.O_PATH|unix.O_RDONLY, 0)
if err != nil {
return err
}
@@ -518,7 +572,7 @@ func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *internal.Zs
}
}
- file, err := openFileUnderRoot(metadata.Name, target, dirfd, unix.O_RDONLY, 0)
+ file, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_RDONLY, 0)
if err != nil {
return err
}
@@ -527,8 +581,8 @@ func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *internal.Zs
return setFileAttrs(file, mode, metadata, options)
}
-func safeLink(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
- sourceFile, err := openFileUnderRoot(metadata.Linkname, target, dirfd, unix.O_RDONLY, 0)
+func safeLink(dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
+ sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_RDONLY, 0)
if err != nil {
return err
}
@@ -537,7 +591,7 @@ func safeLink(target string, dirfd int, mode os.FileMode, metadata *internal.Zst
destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
destDirFd := dirfd
if destDir != "." {
- f, err := openFileUnderRoot(destDir, target, dirfd, unix.O_RDONLY, 0)
+ f, err := openFileUnderRoot(destDir, dirfd, unix.O_RDONLY, 0)
if err != nil {
return err
}
@@ -550,7 +604,7 @@ func safeLink(target string, dirfd int, mode os.FileMode, metadata *internal.Zst
return err
}
- newFile, err := openFileUnderRoot(metadata.Name, target, dirfd, unix.O_WRONLY, 0)
+ newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY, 0)
if err != nil {
return err
}
@@ -559,11 +613,11 @@ func safeLink(target string, dirfd int, mode os.FileMode, metadata *internal.Zst
return setFileAttrs(newFile, mode, metadata, options)
}
-func safeSymlink(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
+func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
destDirFd := dirfd
if destDir != "." {
- f, err := openFileUnderRoot(destDir, target, dirfd, unix.O_RDONLY, 0)
+ f, err := openFileUnderRoot(destDir, dirfd, unix.O_RDONLY, 0)
if err != nil {
return err
}
@@ -580,7 +634,7 @@ type whiteoutHandler struct {
}
func (d whiteoutHandler) Setxattr(path, name string, value []byte) error {
- file, err := openFileUnderRoot(path, d.Root, d.Dirfd, unix.O_RDONLY, 0)
+ file, err := openFileUnderRoot(path, d.Dirfd, unix.O_RDONLY, 0)
if err != nil {
return err
}
@@ -595,7 +649,7 @@ func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
dirfd := d.Dirfd
if dir != "" {
- dir, err := openFileUnderRoot(dir, d.Root, d.Dirfd, unix.O_RDONLY, 0)
+ dir, err := openFileUnderRoot(dir, d.Dirfd, unix.O_RDONLY, 0)
if err != nil {
return err
}
@@ -615,7 +669,7 @@ func checkChownErr(err error, name string, uid, gid int) error {
}
func (d whiteoutHandler) Chown(path string, uid, gid int) error {
- file, err := openFileUnderRoot(path, d.Root, d.Dirfd, unix.O_PATH, 0)
+ file, err := openFileUnderRoot(path, d.Dirfd, unix.O_PATH, 0)
if err != nil {
return err
}
@@ -640,6 +694,13 @@ type hardLinkToCreate struct {
metadata *internal.ZstdFileMetadata
}
+func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bool) bool {
+ if value, ok := storeOpts.PullOptions[name]; ok {
+ return strings.ToLower(value) == "true"
+ }
+ return def
+}
+
func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) {
bigData := map[string][]byte{
bigDataKey: d.manifest,
@@ -654,11 +715,16 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
return output, err
}
- enableHostDedup := false
- if value := storeOpts.PullOptions["enable_host_deduplication"]; strings.ToLower(value) == "true" {
- enableHostDedup = true
+ if !parseBooleanPullOption(&storeOpts, "enable_partial_images", false) {
+ return output, errors.New("enable_partial_images not configured")
}
+ enableHostDedup := parseBooleanPullOption(&storeOpts, "enable_host_deduplication", false)
+
+ // When the hard links deduplication is used, file attributes are ignored because setting them
+ // modifies the source file as well.
+ useHardLinks := parseBooleanPullOption(&storeOpts, "use_hard_links", false)
+
// Generate the manifest
var toc internal.ZstdTOC
if err := json.Unmarshal(d.manifest, &toc); err != nil {
@@ -704,11 +770,6 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
otherLayersCache := prepareOtherLayersCache(d.layersMetadata)
- missingDirsMode := os.FileMode(0700)
- if options.ForceMask != nil {
- missingDirsMode = *options.ForceMask
- }
-
// hardlinks can point to missing files. So create them after all files
// are retrieved
var hardLinks []hardLinkToCreate
@@ -758,7 +819,7 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
if r.Size == 0 {
// Used to have a scope for cleanup.
createEmptyFile := func() error {
- file, err := openFileUnderRoot(r.Name, dest, dirfd, newFileFlags, 0)
+ file, err := openFileUnderRoot(r.Name, dirfd, newFileFlags, 0)
if err != nil {
return err
}
@@ -775,7 +836,7 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
}
case tar.TypeDir:
- if err := safeMkdir(dest, dirfd, mode, &r, options); err != nil {
+ if err := safeMkdir(dirfd, mode, &r, options); err != nil {
return output, err
}
continue
@@ -794,7 +855,7 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
continue
case tar.TypeSymlink:
- if err := safeSymlink(dest, dirfd, mode, &r, options); err != nil {
+ if err := safeSymlink(dirfd, mode, &r, options); err != nil {
return output, err
}
continue
@@ -809,7 +870,7 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
totalChunksSize += r.Size
- dstFile, _, err := findFileInOtherLayers(r, dest, dirfd, otherLayersCache, d.layersTarget, missingDirsMode)
+ found, dstFile, _, err := findFileInOtherLayers(r, dirfd, otherLayersCache, d.layersTarget, useHardLinks)
if err != nil {
return output, err
}
@@ -819,11 +880,13 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
return output, err
}
dstFile.Close()
+ }
+ if found {
continue
}
if enableHostDedup {
- dstFile, _, err = findFileOnTheHost(r, dest, dirfd, missingDirsMode)
+ found, dstFile, _, err = findFileOnTheHost(r, dirfd, useHardLinks)
if err != nil {
return output, err
}
@@ -833,6 +896,8 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
return output, err
}
dstFile.Close()
+ }
+ if found {
continue
}
}
@@ -857,13 +922,13 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
// There are some missing files. Prepare a multirange request for the missing chunks.
if len(missingChunks) > 0 {
missingChunks = mergeMissingChunks(missingChunks, maxNumberMissingChunks)
- if err := retrieveMissingFiles(d, dest, dirfd, missingChunks, missingDirsMode, options); err != nil {
+ if err := retrieveMissingFiles(d, dest, dirfd, missingChunks, options); err != nil {
return output, err
}
}
for _, m := range hardLinks {
- if err := safeLink(m.dest, m.dirfd, m.mode, m.metadata, options); err != nil {
+ if err := safeLink(m.dirfd, m.mode, m.metadata, options); err != nil {
return output, err
}
}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/readers.go b/vendor/github.com/containers/storage/pkg/ioutils/readers.go
index 63f3c07f4..0e89787d4 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/readers.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/readers.go
@@ -17,8 +17,25 @@ func (r *readCloserWrapper) Close() error {
return r.closer()
}
+type readWriteToCloserWrapper struct {
+ io.Reader
+ io.WriterTo
+ closer func() error
+}
+
+func (r *readWriteToCloserWrapper) Close() error {
+ return r.closer()
+}
+
// NewReadCloserWrapper returns a new io.ReadCloser.
func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
+ if wt, ok := r.(io.WriterTo); ok {
+ return &readWriteToCloserWrapper{
+ Reader: r,
+ WriterTo: wt,
+ closer: closer,
+ }
+ }
return &readCloserWrapper{
Reader: r,
closer: closer,
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index dc6eaafa2..5e16b9e37 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -547,6 +547,15 @@ type LayerOptions struct {
// initialize this layer. If set, it should be a child of the layer
// which we want to use as the parent of the new layer.
TemplateLayer string
+ // OriginalDigest specifies a digest of the tarstream (diff), if one is
+ // provided along with these LayerOptions, and reliably known by the caller.
+ // Use the default "" if this fields is not applicable or the value is not known.
+ OriginalDigest digest.Digest
+ // UncompressedDigest specifies a digest of the uncompressed version (“DiffID”)
+ // of the tarstream (diff), if one is provided along with these LayerOptions,
+ // and reliably known by the caller.
+ // Use the default "" if this fields is not applicable or the value is not known.
+ UncompressedDigest digest.Digest
}
// ImageOptions is used for passing options to a Store's CreateImage() method.
@@ -1031,20 +1040,21 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
gidMap = s.gidMap
}
}
- var layerOptions *LayerOptions
+ layerOptions := LayerOptions{
+ OriginalDigest: options.OriginalDigest,
+ UncompressedDigest: options.UncompressedDigest,
+ }
if s.canUseShifting(uidMap, gidMap) {
- layerOptions = &LayerOptions{IDMappingOptions: types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil}}
+ layerOptions.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil}
} else {
- layerOptions = &LayerOptions{
- IDMappingOptions: types.IDMappingOptions{
- HostUIDMapping: options.HostUIDMapping,
- HostGIDMapping: options.HostGIDMapping,
- UIDMap: copyIDMap(uidMap),
- GIDMap: copyIDMap(gidMap),
- },
+ layerOptions.IDMappingOptions = types.IDMappingOptions{
+ HostUIDMapping: options.HostUIDMapping,
+ HostGIDMapping: options.HostGIDMapping,
+ UIDMap: copyIDMap(uidMap),
+ GIDMap: copyIDMap(gidMap),
}
}
- return rlstore.Put(id, parentLayer, names, mountLabel, nil, layerOptions, writeable, nil, diff)
+ return rlstore.Put(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, nil, diff)
}
func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) {
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go
index 35d810895..581cf7cdf 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go
@@ -53,4 +53,10 @@ const (
// AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image.
AnnotationDescription = "org.opencontainers.image.description"
+
+ // AnnotationBaseImageDigest is the annotation key for the digest of the image's base image.
+ AnnotationBaseImageDigest = "org.opencontainers.image.base.digest"
+
+ // AnnotationBaseImageName is the annotation key for the image reference of the image's base image.
+ AnnotationBaseImageName = "org.opencontainers.image.base.name"
)
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go
index fe799bd69..ffff4b6d1 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go
@@ -89,9 +89,20 @@ type Image struct {
// Architecture is the CPU architecture which the binaries in this image are built to run on.
Architecture string `json:"architecture"`
+ // Variant is the variant of the specified CPU architecture which image binaries are intended to run on.
+ Variant string `json:"variant,omitempty"`
+
// OS is the name of the operating system which the image is built to run on.
OS string `json:"os"`
+ // OSVersion is an optional field specifying the operating system
+ // version, for example on Windows `10.0.14393.1066`.
+ OSVersion string `json:"os.version,omitempty"`
+
+ // OSFeatures is an optional field specifying an array of strings,
+ // each listing a required OS feature (for example on Windows `win32k`).
+ OSFeatures []string `json:"os.features,omitempty"`
+
// Config defines the execution parameters which should be used as a base when running a container using the image.
Config ImageConfig `json:"config,omitempty"`
diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/getter.go b/vendor/github.com/vbatts/tar-split/tar/storage/getter.go
index ae11f8ffd..9fed24aa8 100644
--- a/vendor/github.com/vbatts/tar-split/tar/storage/getter.go
+++ b/vendor/github.com/vbatts/tar-split/tar/storage/getter.go
@@ -92,11 +92,12 @@ func NewDiscardFilePutter() FilePutter {
}
type bitBucketFilePutter struct {
+ buffer [32 * 1024]byte // 32 kB is the buffer size currently used by io.Copy, as of August 2021.
}
func (bbfp *bitBucketFilePutter) Put(name string, r io.Reader) (int64, []byte, error) {
c := crc64.New(CRCTable)
- i, err := io.Copy(c, r)
+ i, err := io.CopyBuffer(c, r, bbfp.buffer[:])
return i, c.Sum(nil), err
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/README.md b/vendor/github.com/vbauerster/mpb/v7/README.md
index d0560d799..90d4fe639 100644
--- a/vendor/github.com/vbauerster/mpb/v7/README.md
+++ b/vendor/github.com/vbauerster/mpb/v7/README.md
@@ -84,7 +84,7 @@ func main() {
// replace ETA decorator with "done" message, OnComplete event
decor.OnComplete(
// ETA decorator with ewma age of 60
- decor.EwmaETA(decor.ET_STYLE_GO, 60), "done",
+ decor.EwmaETA(decor.ET_STYLE_GO, 60, decor.WCSyncWidth), "done",
),
),
)
diff --git a/vendor/github.com/vbauerster/mpb/v7/bar.go b/vendor/github.com/vbauerster/mpb/v7/bar.go
index ed6c73eda..ca191cf39 100644
--- a/vendor/github.com/vbauerster/mpb/v7/bar.go
+++ b/vendor/github.com/vbauerster/mpb/v7/bar.go
@@ -20,21 +20,18 @@ type Bar struct {
priority int // used by heap
index int // used by heap
- extendedLines int
toShutdown bool
toDrop bool
noPop bool
hasEwmaDecorators bool
operateState chan func(*bState)
- frameCh chan io.Reader
- syncTableCh chan [][]chan int
- completed chan bool
+ frameCh chan *frame
// cancel is called either by user or on complete event
cancel func()
// done is closed after cacheState is assigned
done chan struct{}
- // cacheState is populated, right after close(shutdown)
+ // cacheState is populated, right after close(b.done)
cacheState *bState
container *Progress
@@ -77,6 +74,11 @@ type bState struct {
debugOut io.Writer
}
+type frame struct {
+ reader io.Reader
+ lines int
+}
+
func newBar(container *Progress, bs *bState) *Bar {
logPrefix := fmt.Sprintf("%sbar#%02d ", container.dlogger.Prefix(), bs.id)
ctx, cancel := context.WithCancel(container.ctx)
@@ -87,9 +89,7 @@ func newBar(container *Progress, bs *bState) *Bar {
toDrop: bs.dropOnComplete,
noPop: bs.noPop,
operateState: make(chan func(*bState)),
- frameCh: make(chan io.Reader, 1),
- syncTableCh: make(chan [][]chan int, 1),
- completed: make(chan bool, 1),
+ frameCh: make(chan *frame, 1),
done: make(chan struct{}),
cancel: cancel,
dlogger: log.New(bs.debugOut, logPrefix, log.Lshortfile),
@@ -145,6 +145,7 @@ func (b *Bar) SetRefill(amount int64) {
// TraverseDecorators traverses all available decorators and calls cb func on each.
func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) {
+ done := make(chan struct{})
select {
case b.operateState <- func(s *bState) {
for _, decorators := range [...][]decor.Decorator{
@@ -155,7 +156,9 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) {
cb(extractBaseDecorator(d))
}
}
+ close(done)
}:
+ <-done
case <-b.done:
}
}
@@ -174,7 +177,7 @@ func (b *Bar) SetTotal(total int64, triggerComplete bool) {
if s.triggerComplete && !s.completed {
s.current = s.total
s.completed = true
- go b.refreshTillShutdown()
+ go b.forceRefreshIfLastUncompleted()
}
}:
case <-b.done:
@@ -192,7 +195,7 @@ func (b *Bar) SetCurrent(current int64) {
if s.triggerComplete && s.current >= s.total {
s.current = s.total
s.completed = true
- go b.refreshTillShutdown()
+ go b.forceRefreshIfLastUncompleted()
}
}:
case <-b.done:
@@ -219,7 +222,7 @@ func (b *Bar) IncrInt64(n int64) {
if s.triggerComplete && s.current >= s.total {
s.current = s.total
s.completed = true
- go b.refreshTillShutdown()
+ go b.forceRefreshIfLastUncompleted()
}
}:
case <-b.done:
@@ -258,32 +261,49 @@ func (b *Bar) DecoratorAverageAdjust(start time.Time) {
// priority, i.e. bar will be on top. If you don't need to set priority
// dynamically, better use BarPriority option.
func (b *Bar) SetPriority(priority int) {
- select {
- case <-b.done:
- default:
- b.container.setBarPriority(b, priority)
- }
+ b.container.UpdateBarPriority(b, priority)
}
-// Abort interrupts bar's running goroutine. Call this, if you'd like
-// to stop/remove bar before completion event. It has no effect after
-// completion event. If drop is true bar will be removed as well.
+// Abort interrupts bar's running goroutine. Abort won't be engaged
+// if bar is already in complete state. If drop is true bar will be
+// removed as well.
func (b *Bar) Abort(drop bool) {
select {
- case <-b.done:
- default:
+ case b.operateState <- func(s *bState) {
+ if s.completed == true {
+ return
+ }
if drop {
b.container.dropBar(b)
+ b.cancel()
+ return
}
- b.cancel()
+ go func() {
+ var uncompleted int
+ b.container.traverseBars(func(bar *Bar) bool {
+ if b != bar && !bar.Completed() {
+ uncompleted++
+ return false
+ }
+ return true
+ })
+ if uncompleted == 0 {
+ b.container.refreshCh <- time.Now()
+ }
+ b.cancel()
+ }()
+ }:
+ <-b.done
+ case <-b.done:
}
}
// Completed reports whether the bar is in completed state.
func (b *Bar) Completed() bool {
+ result := make(chan bool)
select {
- case b.operateState <- func(s *bState) { b.completed <- s.completed }:
- return <-b.completed
+ case b.operateState <- func(s *bState) { result <- s.completed }:
+ return <-result
case <-b.done:
return true
}
@@ -296,12 +316,12 @@ func (b *Bar) serve(ctx context.Context, s *bState) {
case op := <-b.operateState:
op(s)
case <-ctx.Done():
- b.cacheState = s
- close(b.done)
// Notifying decorators about shutdown event
for _, sl := range s.shutdownListeners {
sl.Shutdown()
}
+ b.cacheState = s
+ close(b.done)
return
}
}
@@ -319,17 +339,15 @@ func (b *Bar) render(tw int) {
b.toShutdown = !b.toShutdown
b.recoveredPanic = p
}
- frame, lines := s.extender(nil, s.reqWidth, stat)
- b.extendedLines = lines
- b.frameCh <- frame
+ reader, lines := s.extender(nil, s.reqWidth, stat)
+ b.frameCh <- &frame{reader, lines + 1}
b.dlogger.Println(p)
}
s.completeFlushed = s.completed
}()
- frame, lines := s.extender(s.draw(stat), s.reqWidth, stat)
- b.extendedLines = lines
+ reader, lines := s.extender(s.draw(stat), s.reqWidth, stat)
b.toShutdown = s.completed && !s.completeFlushed
- b.frameCh <- frame
+ b.frameCh <- &frame{reader, lines + 1}
}:
case <-b.done:
s := b.cacheState
@@ -338,9 +356,8 @@ func (b *Bar) render(tw int) {
if b.recoveredPanic == nil {
r = s.draw(stat)
}
- frame, lines := s.extender(r, s.reqWidth, stat)
- b.extendedLines = lines
- b.frameCh <- frame
+ reader, lines := s.extender(r, s.reqWidth, stat)
+ b.frameCh <- &frame{reader, lines + 1}
}
}
@@ -359,31 +376,42 @@ func (b *Bar) subscribeDecorators() {
shutdownListeners = append(shutdownListeners, d)
}
})
+ b.hasEwmaDecorators = len(ewmaDecorators) != 0
select {
case b.operateState <- func(s *bState) {
s.averageDecorators = averageDecorators
s.ewmaDecorators = ewmaDecorators
s.shutdownListeners = shutdownListeners
}:
- b.hasEwmaDecorators = len(ewmaDecorators) != 0
case <-b.done:
}
}
-func (b *Bar) refreshTillShutdown() {
- for {
- select {
- case b.container.refreshCh <- time.Now():
- case <-b.done:
- return
+func (b *Bar) forceRefreshIfLastUncompleted() {
+ var uncompleted int
+ b.container.traverseBars(func(bar *Bar) bool {
+ if b != bar && !bar.Completed() {
+ uncompleted++
+ return false
+ }
+ return true
+ })
+ if uncompleted == 0 {
+ for {
+ select {
+ case b.container.refreshCh <- time.Now():
+ case <-b.done:
+ return
+ }
}
}
}
func (b *Bar) wSyncTable() [][]chan int {
+ result := make(chan [][]chan int)
select {
- case b.operateState <- func(s *bState) { b.syncTableCh <- s.wSyncTable() }:
- return <-b.syncTableCh
+ case b.operateState <- func(s *bState) { result <- s.wSyncTable() }:
+ return <-result
case <-b.done:
return b.cacheState.wSyncTable()
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go
index e30d4921c..80b210455 100644
--- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go
+++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go
@@ -26,15 +26,17 @@ type BarStyleComposer interface {
Filler(string) BarStyleComposer
Refiller(string) BarStyleComposer
Padding(string) BarStyleComposer
- Tip(...string) BarStyleComposer
+ TipOnComplete(string) BarStyleComposer
+ Tip(frames ...string) BarStyleComposer
Reverse() BarStyleComposer
}
type bFiller struct {
components [components]*component
tip struct {
- count uint
- frames []*component
+ count uint
+ onComplete *component
+ frames []*component
}
flush func(dst io.Writer, filling, padding [][]byte)
}
@@ -45,25 +47,26 @@ type component struct {
}
type barStyle struct {
- lbound string
- rbound string
- filler string
- refiller string
- padding string
- tip []string
- rev bool
+ lbound string
+ rbound string
+ filler string
+ refiller string
+ padding string
+ tipOnComplete string
+ tipFrames []string
+ rev bool
}
// BarStyle constructs default bar style which can be altered via
// BarStyleComposer interface.
func BarStyle() BarStyleComposer {
return &barStyle{
- lbound: "[",
- rbound: "]",
- filler: "=",
- refiller: "+",
- padding: "-",
- tip: []string{">"},
+ lbound: "[",
+ rbound: "]",
+ filler: "=",
+ refiller: "+",
+ padding: "-",
+ tipFrames: []string{">"},
}
}
@@ -92,9 +95,14 @@ func (s *barStyle) Padding(padding string) BarStyleComposer {
return s
}
-func (s *barStyle) Tip(tip ...string) BarStyleComposer {
- if len(tip) != 0 {
- s.tip = append(s.tip[:0], tip...)
+func (s *barStyle) TipOnComplete(tip string) BarStyleComposer {
+ s.tipOnComplete = tip
+ return s
+}
+
+func (s *barStyle) Tip(frames ...string) BarStyleComposer {
+ if len(frames) != 0 {
+ s.tipFrames = append(s.tipFrames[:0], frames...)
}
return s
}
@@ -133,8 +141,12 @@ func (s *barStyle) Build() BarFiller {
width: runewidth.StringWidth(stripansi.Strip(s.padding)),
bytes: []byte(s.padding),
}
- bf.tip.frames = make([]*component, len(s.tip))
- for i, t := range s.tip {
+ bf.tip.onComplete = &component{
+ width: runewidth.StringWidth(stripansi.Strip(s.tipOnComplete)),
+ bytes: []byte(s.tipOnComplete),
+ }
+ bf.tip.frames = make([]*component, len(s.tipFrames))
+ for i, t := range s.tipFrames {
bf.tip.frames[i] = &component{
width: runewidth.StringWidth(stripansi.Strip(t)),
bytes: []byte(t),
@@ -146,64 +158,82 @@ func (s *barStyle) Build() BarFiller {
func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) {
width = internal.CheckRequestedWidth(width, stat.AvailableWidth)
brackets := s.components[iLbound].width + s.components[iRbound].width
- if width < brackets {
- return
- }
// don't count brackets as progress
width -= brackets
+ if width < 0 {
+ return
+ }
w.Write(s.components[iLbound].bytes)
defer w.Write(s.components[iRbound].bytes)
- curWidth := int(internal.PercentageRound(stat.Total, stat.Current, width))
- refWidth, filled := 0, curWidth
- filling := make([][]byte, 0, curWidth)
-
- if curWidth > 0 && curWidth != width {
- tipFrame := s.tip.frames[s.tip.count%uint(len(s.tip.frames))]
- filling = append(filling, tipFrame.bytes)
- curWidth -= tipFrame.width
- s.tip.count++
+ if width == 0 {
+ return
}
- if stat.Refill > 0 && curWidth > 0 {
- refWidth = int(internal.PercentageRound(stat.Total, int64(stat.Refill), width))
- if refWidth > curWidth {
- refWidth = curWidth
- }
- curWidth -= refWidth
+ var filling [][]byte
+ var padding [][]byte
+ var tip *component
+ var filled int
+ var refWidth int
+ curWidth := int(internal.PercentageRound(stat.Total, stat.Current, uint(width)))
+
+ if stat.Current >= stat.Total {
+ tip = s.tip.onComplete
+ } else {
+ tip = s.tip.frames[s.tip.count%uint(len(s.tip.frames))]
}
- for curWidth > 0 && curWidth >= s.components[iFiller].width {
- filling = append(filling, s.components[iFiller].bytes)
- curWidth -= s.components[iFiller].width
- if s.components[iFiller].width == 0 {
- break
- }
+ if curWidth > 0 {
+ filling = append(filling, tip.bytes)
+ filled += tip.width
+ s.tip.count++
}
- for refWidth > 0 && refWidth >= s.components[iRefiller].width {
- filling = append(filling, s.components[iRefiller].bytes)
- refWidth -= s.components[iRefiller].width
- if s.components[iRefiller].width == 0 {
- break
+ if stat.Refill > 0 {
+ refWidth = int(internal.PercentageRound(stat.Total, stat.Refill, uint(width)))
+ curWidth -= refWidth
+ refWidth += curWidth
+ }
+
+ for filled < curWidth {
+ if curWidth-filled >= s.components[iFiller].width {
+ filling = append(filling, s.components[iFiller].bytes)
+ if s.components[iFiller].width == 0 {
+ break
+ }
+ filled += s.components[iFiller].width
+ } else {
+ filling = append(filling, []byte("…"))
+ filled++
}
}
- filled -= curWidth + refWidth
- padWidth := width - filled
- padding := make([][]byte, 0, padWidth)
- for padWidth > 0 && padWidth >= s.components[iPadding].width {
- padding = append(padding, s.components[iPadding].bytes)
- padWidth -= s.components[iPadding].width
- if s.components[iPadding].width == 0 {
- break
+ for filled < refWidth {
+ if refWidth-filled >= s.components[iRefiller].width {
+ filling = append(filling, s.components[iRefiller].bytes)
+ if s.components[iRefiller].width == 0 {
+ break
+ }
+ filled += s.components[iRefiller].width
+ } else {
+ filling = append(filling, []byte("…"))
+ filled++
}
}
+ padWidth := width - filled
for padWidth > 0 {
- padding = append(padding, []byte("…"))
- padWidth--
+ if padWidth >= s.components[iPadding].width {
+ padding = append(padding, s.components[iPadding].bytes)
+ if s.components[iPadding].width == 0 {
+ break
+ }
+ padWidth -= s.components[iPadding].width
+ } else {
+ padding = append(padding, []byte("…"))
+ padWidth--
+ }
}
s.flush(w, filling, padding)
diff --git a/vendor/github.com/vbauerster/mpb/v7/container_option.go b/vendor/github.com/vbauerster/mpb/v7/container_option.go
index e4254f662..a858c3c51 100644
--- a/vendor/github.com/vbauerster/mpb/v7/container_option.go
+++ b/vendor/github.com/vbauerster/mpb/v7/container_option.go
@@ -62,7 +62,11 @@ func WithRenderDelay(ch <-chan struct{}) ContainerOption {
// have been rendered.
func WithShutdownNotifier(ch chan struct{}) ContainerOption {
return func(s *pState) {
- s.shutdownNotifier = ch
+ select {
+ case <-ch:
+ default:
+ s.shutdownNotifier = ch
+ }
}
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go
index 1ade54761..925c8b1dc 100644
--- a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go
+++ b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go
@@ -22,7 +22,7 @@ const (
type Writer struct {
out io.Writer
buf bytes.Buffer
- lineCount int
+ lines int
fd int
isTerminal bool
}
@@ -38,15 +38,15 @@ func New(out io.Writer) *Writer {
}
// Flush flushes the underlying buffer.
-func (w *Writer) Flush(lineCount int) (err error) {
+func (w *Writer) Flush(lines int) (err error) {
// some terminals interpret 'cursor up 0' as 'cursor up 1'
- if w.lineCount > 0 {
+ if w.lines > 0 {
err = w.clearLines()
if err != nil {
return
}
}
- w.lineCount = lineCount
+ w.lines = lines
_, err = w.buf.WriteTo(w.out)
return
}
@@ -78,7 +78,7 @@ func (w *Writer) GetWidth() (int, error) {
func (w *Writer) ansiCuuAndEd() (err error) {
buf := make([]byte, 8)
- buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lineCount), 10)
+ buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lines), 10)
_, err = w.out.Write(append(buf, cuuAndEd...))
return
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go
index 1a69c81ac..8f99dbe32 100644
--- a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go
+++ b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go
@@ -26,7 +26,7 @@ func (w *Writer) clearLines() error {
return err
}
- info.CursorPosition.Y -= int16(w.lineCount)
+ info.CursorPosition.Y -= int16(w.lines)
if info.CursorPosition.Y < 0 {
info.CursorPosition.Y = 0
}
@@ -40,7 +40,7 @@ func (w *Writer) clearLines() error {
X: info.Window.Left,
Y: info.CursorPosition.Y,
}
- count := uint32(info.Size.X) * uint32(w.lineCount)
+ count := uint32(info.Size.X) * uint32(w.lines)
_, _, _ = procFillConsoleOutputCharacter.Call(
uintptr(w.fd),
uintptr(' '),
diff --git a/vendor/github.com/vbauerster/mpb/v7/go.mod b/vendor/github.com/vbauerster/mpb/v7/go.mod
index 22a2c651c..7b177d0db 100644
--- a/vendor/github.com/vbauerster/mpb/v7/go.mod
+++ b/vendor/github.com/vbauerster/mpb/v7/go.mod
@@ -4,7 +4,7 @@ require (
github.com/VividCortex/ewma v1.2.0
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
github.com/mattn/go-runewidth v0.0.13
- golang.org/x/sys v0.0.0-20210616094352-59db8d763f22
+ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e
)
go 1.14
diff --git a/vendor/github.com/vbauerster/mpb/v7/go.sum b/vendor/github.com/vbauerster/mpb/v7/go.sum
index 59051bd7b..45584e0bf 100644
--- a/vendor/github.com/vbauerster/mpb/v7/go.sum
+++ b/vendor/github.com/vbauerster/mpb/v7/go.sum
@@ -6,5 +6,5 @@ github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
-golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e h1:WUoyKPm6nCo1BnNUvPGnFG3T5DUVem42yDJZZ4CNxMA=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
diff --git a/vendor/github.com/vbauerster/mpb/v7/internal/percentage.go b/vendor/github.com/vbauerster/mpb/v7/internal/percentage.go
index a8ef8be12..4bc36f5ba 100644
--- a/vendor/github.com/vbauerster/mpb/v7/internal/percentage.go
+++ b/vendor/github.com/vbauerster/mpb/v7/internal/percentage.go
@@ -3,7 +3,7 @@ package internal
import "math"
// Percentage is a helper function, to calculate percentage.
-func Percentage(total, current int64, width int) float64 {
+func Percentage(total, current int64, width uint) float64 {
if total <= 0 {
return 0
}
@@ -14,6 +14,6 @@ func Percentage(total, current int64, width int) float64 {
}
// PercentageRound same as Percentage but with math.Round.
-func PercentageRound(total, current int64, width int) float64 {
+func PercentageRound(total, current int64, width uint) float64 {
return math.Round(Percentage(total, current, width))
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/progress.go b/vendor/github.com/vbauerster/mpb/v7/progress.go
index b2017f3f0..c60c65694 100644
--- a/vendor/github.com/vbauerster/mpb/v7/progress.go
+++ b/vendor/github.com/vbauerster/mpb/v7/progress.go
@@ -19,7 +19,7 @@ import (
const (
// default RefreshRate
- prr = 120 * time.Millisecond
+ prr = 150 * time.Millisecond
)
// Progress represents a container that renders one or more progress
@@ -157,27 +157,40 @@ func (p *Progress) dropBar(b *Bar) {
}
}
-func (p *Progress) setBarPriority(b *Bar, priority int) {
+func (p *Progress) traverseBars(cb func(b *Bar) bool) {
+ done := make(chan struct{})
select {
case p.operateState <- func(s *pState) {
- if b.index < 0 {
- return
+ for i := 0; i < s.bHeap.Len(); i++ {
+ bar := s.bHeap[i]
+ if !cb(bar) {
+ break
+ }
}
- b.priority = priority
- heap.Fix(&s.bHeap, b.index)
+ close(done)
}:
+ <-done
case <-p.done:
}
}
// UpdateBarPriority same as *Bar.SetPriority(int).
func (p *Progress) UpdateBarPriority(b *Bar, priority int) {
- p.setBarPriority(b, priority)
+ select {
+ case p.operateState <- func(s *pState) {
+ if b.index < 0 {
+ return
+ }
+ b.priority = priority
+ heap.Fix(&s.bHeap, b.index)
+ }:
+ case <-p.done:
+ }
}
// BarCount returns bars count.
func (p *Progress) BarCount() int {
- result := make(chan int, 1)
+ result := make(chan int)
select {
case p.operateState <- func(s *pState) { result <- s.bHeap.Len() }:
return <-result
@@ -222,7 +235,7 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) {
p.dlogger.Println(err)
}
case <-s.shutdownNotifier:
- if s.heapUpdated {
+ for s.heapUpdated {
if err := s.render(cw); err != nil {
p.dlogger.Println(err)
}
@@ -291,11 +304,12 @@ func (s *pState) render(cw *cwriter.Writer) error {
}
func (s *pState) flush(cw *cwriter.Writer) error {
- var lineCount int
- bm := make(map[*Bar]struct{}, s.bHeap.Len())
+ var totalLines int
+ bm := make(map[*Bar]int, s.bHeap.Len())
for s.bHeap.Len() > 0 {
b := heap.Pop(&s.bHeap).(*Bar)
- cw.ReadFrom(<-b.frameCh)
+ frame := <-b.frameCh
+ cw.ReadFrom(frame.reader)
if b.toShutdown {
if b.recoveredPanic != nil {
s.barShutdownQueue = append(s.barShutdownQueue, b)
@@ -308,8 +322,8 @@ func (s *pState) flush(cw *cwriter.Writer) error {
}()
}
}
- lineCount += b.extendedLines + 1
- bm[b] = struct{}{}
+ bm[b] = frame.lines
+ totalLines += frame.lines
}
for _, b := range s.barShutdownQueue {
@@ -320,7 +334,7 @@ func (s *pState) flush(cw *cwriter.Writer) error {
b.toDrop = true
}
if s.popCompleted && !b.noPop {
- lineCount -= b.extendedLines + 1
+ totalLines -= bm[b]
b.toDrop = true
}
if b.toDrop {
@@ -335,7 +349,7 @@ func (s *pState) flush(cw *cwriter.Writer) error {
heap.Push(&s.bHeap, b)
}
- return cw.Flush(lineCount)
+ return cw.Flush(totalLines)
}
func (s *pState) updateSyncMatrix() {