From ba89a058882f1027226943fe2ef614930ab60f8e Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Sat, 5 Jan 2019 13:41:44 -0500 Subject: Vendor in latest containers/buildah code This should improve the speed of podman build. Has fixes from containres/image for parallell pull. Also vendor containers/storage and containers/image Signed-off-by: Daniel J Walsh --- vendor/github.com/containers/buildah/config.go | 3 - vendor/github.com/containers/buildah/image.go | 4 + .../containers/buildah/imagebuildah/build.go | 7 +- .../buildah/imagebuildah/chroot_symlink.go | 5 + .../containers/buildah/pkg/blobcache/blobcache.go | 8 + .../containers/buildah/pkg/cli/common.go | 4 + vendor/github.com/containers/buildah/run.go | 16 ++ .../containers/buildah/unshare/unshare.c | 3 +- vendor/github.com/containers/buildah/util/util.go | 50 +----- vendor/github.com/containers/buildah/vendor.conf | 14 +- vendor/github.com/containers/image/copy/copy.go | 145 ++++++++++++---- .../containers/image/directory/directory_dest.go | 5 + .../containers/image/directory/directory_src.go | 5 + .../containers/image/docker/docker_client.go | 35 +++- .../containers/image/docker/docker_image_dest.go | 5 + .../containers/image/docker/docker_image_src.go | 5 + .../containers/image/docker/tarfile/dest.go | 5 + .../containers/image/docker/tarfile/src.go | 5 + .../containers/image/oci/archive/oci_dest.go | 5 + .../containers/image/oci/archive/oci_src.go | 5 + .../containers/image/oci/layout/oci_dest.go | 5 + .../containers/image/oci/layout/oci_src.go | 5 + .../containers/image/openshift/openshift.go | 10 ++ .../containers/image/ostree/ostree_dest.go | 9 +- .../containers/image/ostree/ostree_src.go | 9 +- .../containers/image/pkg/blobinfocache/default.go | 2 +- .../image/pkg/compression/compression.go | 4 +- .../pkg/sysregistriesv2/system_registries_v2.go | 35 +++- .../containers/image/storage/storage_image.go | 28 ++- .../containers/image/tarball/tarball_src.go | 10 +- vendor/github.com/containers/image/types/types.go | 4 + vendor/github.com/containers/image/vendor.conf | 10 +- vendor/github.com/opencontainers/go-digest/LICENSE | 191 ++++++++++++++++++++ .../opencontainers/go-digest/LICENSE.code | 191 -------------------- .../github.com/opencontainers/go-digest/README.md | 2 +- .../opencontainers/go-digest/algorithm.go | 40 ++++- .../github.com/opencontainers/go-digest/digest.go | 42 ++--- vendor/golang.org/x/sync/LICENSE | 27 +++ vendor/golang.org/x/sync/PATENTS | 22 +++ vendor/golang.org/x/sync/README.md | 18 ++ vendor/golang.org/x/sync/semaphore/semaphore.go | 127 ++++++++++++++ vendor/gopkg.in/cheggaaa/pb.v1/README.md | 11 +- vendor/gopkg.in/cheggaaa/pb.v1/format.go | 84 ++++++--- vendor/gopkg.in/cheggaaa/pb.v1/pb.go | 193 ++++++++++++++------- vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go | 2 +- vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go | 8 - vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go | 6 - vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go | 4 +- vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go | 118 +++++++------ vendor/gopkg.in/cheggaaa/pb.v1/pool.go | 62 +++++-- vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go | 16 +- vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go | 13 +- vendor/gopkg.in/cheggaaa/pb.v1/runecount.go | 2 +- vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go | 7 - vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go | 13 ++ 55 files changed, 1128 insertions(+), 536 deletions(-) create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE delete mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE.code create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/README.md create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go delete mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go delete mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go delete mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go create mode 100644 vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go (limited to 'vendor') diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go index 3609694f6..114b8ca37 100644 --- a/vendor/github.com/containers/buildah/config.go +++ b/vendor/github.com/containers/buildah/config.go @@ -474,9 +474,6 @@ func (b *Builder) Hostname() string { // Note: this setting is not present in the OCIv1 image format, so it is // discarded when writing images using OCIv1 formats. func (b *Builder) SetHostname(name string) { - if name != "" && b.Format != Dockerv2ImageManifest { - logrus.Errorf("HOSTNAME is not supported for OCI image format, hostname %s will be ignored. Must use `docker` format", name) - } b.Docker.Config.Hostname = name } diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index 163d34269..47842db7d 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -557,6 +557,10 @@ func (i *containerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.B return nil, nil } +func (i *containerImageSource) HasThreadSafeGetBlob() bool { + return false +} + func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, cache types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) { if blob.Digest == i.configDigest { logrus.Debugf("start reading config") diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index 2681bc198..d838260e7 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -1307,7 +1307,12 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (strin var imageRef reference.Canonical imageID := "" - if !b.layers && !b.noCache { + + // Check if we have a one line Dockerfile making layers irrelevant + // or the user told us to ignore layers. + ignoreLayers := (len(stages) < 2 && len(stages[0].Node.Children) < 2) || (!b.layers && !b.noCache) + + if ignoreLayers { imgID, ref, err := stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "") if err != nil { return "", nil, err diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go index edb5837db..6feedf6a5 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go +++ b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go @@ -131,6 +131,11 @@ func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) { func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) { var timeIsGreater bool + // the Walk below doesn't work if rootdir and path are equal + if rootdir == path { + return false, nil + } + // Convert historyTime from string to time.Time for comparison histTime, err := time.Parse(time.RFC3339Nano, historyTime) if err != nil { diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go index 63022f15d..ae55316b0 100644 --- a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go +++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go @@ -218,6 +218,10 @@ func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *diges return s.source.GetManifest(ctx, instanceDigest) } +func (s *blobCacheSource) HasThreadSafeGetBlob() bool { + return false +} + func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { present, size, err := s.reference.HasBlob(blobinfo) if err != nil { @@ -398,6 +402,10 @@ func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os } } +func (s *blobCacheDestination) HasThreadSafePutBlob() bool { + return false +} + func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { var tempfile *os.File var err error diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go index e3a4fe62a..dc1b2bc69 100644 --- a/vendor/github.com/containers/buildah/pkg/cli/common.go +++ b/vendor/github.com/containers/buildah/pkg/cli/common.go @@ -148,6 +148,10 @@ var ( Name: "loglevel", Usage: "adjust logging level (range from -2 to 3)", }, + cli.StringFlag{ + Name: "platform", + Usage: "CLI compatibility: no action or effect", + }, cli.BoolTFlag{ Name: "pull", Usage: "pull the image if not present", diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go index 5d2cd6a32..a0627f489 100644 --- a/vendor/github.com/containers/buildah/run.go +++ b/vendor/github.com/containers/buildah/run.go @@ -26,6 +26,7 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/stringid" units "github.com/docker/go-units" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/runtime-spec/specs-go" @@ -944,10 +945,25 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options RunOptions) g.SetHostname(options.Hostname) } else if b.Hostname() != "" { g.SetHostname(b.Hostname()) + } else { + g.SetHostname(stringid.TruncateID(b.ContainerID)) } } else { g.SetHostname("") } + + found := false + spec := g.Spec() + for i := range spec.Process.Env { + if strings.HasPrefix(spec.Process.Env[i], "HOSTNAME=") { + found = true + break + } + } + if !found { + spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname)) + } + return configureNetwork, configureNetworks, nil } diff --git a/vendor/github.com/containers/buildah/unshare/unshare.c b/vendor/github.com/containers/buildah/unshare/unshare.c index 6873e49c2..3865e414f 100644 --- a/vendor/github.com/containers/buildah/unshare/unshare.c +++ b/vendor/github.com/containers/buildah/unshare/unshare.c @@ -79,7 +79,8 @@ void _buildah_unshare(void) pidfd = _buildah_unshare_parse_envint("_Buildah-pid-pipe"); if (pidfd != -1) { snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); - if (write(pidfd, buf, strlen(buf)) != strlen(buf)) { + size_t size = write(pidfd, buf, strlen(buf)); + if (size != strlen(buf)) { fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); _exit(1); } diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index 427c8db28..f4cac522e 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -395,57 +395,11 @@ func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.Linux // ParseIDMappings parses mapping triples. func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { - nonDigitsToWhitespace := func(r rune) rune { - if strings.IndexRune("0123456789", r) == -1 { - return ' ' - } else { - return r - } - } - parseTriple := func(spec []string) (container, host, size uint32, err error) { - cid, err := strconv.ParseUint(spec[0], 10, 32) - if err != nil { - return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err) - } - hid, err := strconv.ParseUint(spec[1], 10, 32) - if err != nil { - return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err) - } - sz, err := strconv.ParseUint(spec[2], 10, 32) - if err != nil { - return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err) - } - return uint32(cid), uint32(hid), uint32(sz), nil - } - parseIDMap := func(mapSpec []string, mapSetting string) (idmap []idtools.IDMap, err error) { - for _, idMapSpec := range mapSpec { - idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec)) - if len(idSpec)%3 != 0 { - return nil, errors.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting) - } - for i := range idSpec { - if i%3 != 0 { - continue - } - cid, hid, size, err := parseTriple(idSpec[i : i+3]) - if err != nil { - return nil, errors.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting) - } - mapping := idtools.IDMap{ - ContainerID: int(cid), - HostID: int(hid), - Size: int(size), - } - idmap = append(idmap, mapping) - } - } - return idmap, nil - } - uid, err := parseIDMap(uidmap, "userns-uid-map") + uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") if err != nil { return nil, nil, err } - gid, err := parseIDMap(gidmap, "userns-gid-map") + gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") if err != nil { return nil, nil, err } diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf index 61325114c..ee8192935 100644 --- a/vendor/github.com/containers/buildah/vendor.conf +++ b/vendor/github.com/containers/buildah/vendor.conf @@ -3,10 +3,10 @@ github.com/blang/semver master github.com/BurntSushi/toml master github.com/containerd/continuity master github.com/containernetworking/cni v0.7.0-alpha1 -github.com/containers/image d53afe179b381fafb427e6b9cf9b1996a98c1067 +github.com/containers/image 0c6cc8e1420001ae39fa89d308b3b3bc5ee81c57 github.com/boltdb/bolt master -github.com/containers/libpod fe4f09493f41f675d24c969d1b60d1a6a45ddb9e -github.com/containers/storage db40f96d853dfced60c563e61fb66ba231ce7c8d +github.com/containers/libpod c8eaf59d5f4bec249db8134c6a9fcfbcac792519 +github.com/containers/storage 60a692f7ce891feb91ce0eda87bd06bfd5651dff github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 @@ -32,7 +32,7 @@ github.com/mistifyio/go-zfs master github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c github.com/mtrmac/gpgme master github.com/Nvveen/Gotty master -github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc +github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 github.com/opencontainers/image-spec v1.0.0 github.com/opencontainers/runc master github.com/opencontainers/runtime-spec v1.0.0 @@ -55,10 +55,14 @@ github.com/xeipuuv/gojsonreference master github.com/xeipuuv/gojsonschema master golang.org/x/crypto master golang.org/x/net master +golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e golang.org/x/sys master golang.org/x/text master -gopkg.in/cheggaaa/pb.v1 v1.0.13 +gopkg.in/cheggaaa/pb.v1 v1.0.27 gopkg.in/yaml.v2 cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b k8s.io/apimachinery master k8s.io/client-go master k8s.io/kubernetes master +github.com/klauspost/pgzip v1.2.1 +github.com/klauspost/compress v1.4.1 +github.com/klauspost/cpuid v1.2.0 diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go index 013080e8d..89c7e580f 100644 --- a/vendor/github.com/containers/image/copy/copy.go +++ b/vendor/github.com/containers/image/copy/copy.go @@ -2,7 +2,6 @@ package copy import ( "bytes" - "compress/gzip" "context" "fmt" "io" @@ -10,6 +9,7 @@ import ( "reflect" "runtime" "strings" + "sync" "time" "github.com/containers/image/image" @@ -18,9 +18,11 @@ import ( "github.com/containers/image/signature" "github.com/containers/image/transports" "github.com/containers/image/types" + "github.com/klauspost/pgzip" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sync/semaphore" pb "gopkg.in/cheggaaa/pb.v1" ) @@ -32,6 +34,10 @@ type digestingReader struct { validationSucceeded bool } +// maxParallelDownloads is used to limit the maxmimum number of parallel +// downloads. Let's follow Firefox by limiting it to 6. +var maxParallelDownloads = 6 + // newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error // or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. // (neither is set if EOF is never reached). @@ -81,6 +87,7 @@ type copier struct { progressInterval time.Duration progress chan types.ProgressProperties blobInfoCache types.BlobInfoCache + copyInParallel bool } // imageCopier tracks state specific to a single image (possibly an item of a manifest list) @@ -145,12 +152,14 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, } }() + copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() c := &copier{ dest: dest, rawSource: rawSource, reportWriter: reportWriter, progressInterval: options.ProgressInterval, progress: options.Progress, + copyInParallel: copyInParallel, // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually // we might want to add a separate CommonCtx — or would that be too confusing? @@ -380,11 +389,26 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error { return nil } +// shortDigest returns the first 12 characters of the digest. +func shortDigest(d digest.Digest) string { + return d.Encoded()[:12] +} + +// createProgressBar creates a pb.ProgressBar. +func createProgressBar(srcInfo types.BlobInfo, kind string, writer io.Writer) *pb.ProgressBar { + bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES) + bar.SetMaxWidth(80) + bar.ShowTimeLeft = false + bar.ShowPercent = false + bar.Prefix(fmt.Sprintf("Copying %s %s:", kind, shortDigest(srcInfo.Digest))) + bar.Output = writer + return bar +} + // copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. func (ic *imageCopier) copyLayers(ctx context.Context) error { srcInfos := ic.src.LayerInfos() - destInfos := []types.BlobInfo{} - diffIDs := []digest.Digest{} + numLayers := len(srcInfos) updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) if err != nil { return err @@ -397,30 +421,83 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { srcInfos = updatedSrcInfos srcInfosUpdated = true } - for _, srcLayer := range srcInfos { - var ( - destInfo types.BlobInfo - diffID digest.Digest - err error - ) + + type copyLayerData struct { + destInfo types.BlobInfo + diffID digest.Digest + err error + } + + // copyGroup is used to determine if all layers are copied + copyGroup := sync.WaitGroup{} + copyGroup.Add(numLayers) + // copySemaphore is used to limit the number of parallel downloads to + // avoid malicious images causing troubles and to be nice to servers. + var copySemaphore *semaphore.Weighted + if ic.c.copyInParallel { + copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads)) + } else { + copySemaphore = semaphore.NewWeighted(int64(1)) + } + + data := make([]copyLayerData, numLayers) + copyLayerHelper := func(index int, srcLayer types.BlobInfo, bar *pb.ProgressBar) { + defer bar.Finish() + defer copySemaphore.Release(1) + defer copyGroup.Done() + cld := copyLayerData{} if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { // DiffIDs are, currently, needed only when converting from schema1. // In which case src.LayerInfos will not have URLs because schema1 // does not support them. if ic.diffIDsAreNeeded { - return errors.New("getting DiffID for foreign layers is unimplemented") + cld.err = errors.New("getting DiffID for foreign layers is unimplemented") + bar.Prefix(fmt.Sprintf("Skipping blob %s (DiffID foreign layer unimplemented):", shortDigest(srcLayer.Digest))) + bar.Finish() + } else { + cld.destInfo = srcLayer + logrus.Debugf("Skipping foreign layer %q copy to %s\n", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) + bar.Prefix(fmt.Sprintf("Skipping blob %s (foreign layer):", shortDigest(srcLayer.Digest))) + bar.Add64(bar.Total) + bar.Finish() } - destInfo = srcLayer - ic.c.Printf("Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.c.dest.Reference().Transport().Name()) } else { - destInfo, diffID, err = ic.copyLayer(ctx, srcLayer) - if err != nil { - return err - } + cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, bar) } - destInfos = append(destInfos, destInfo) - diffIDs = append(diffIDs, diffID) + data[index] = cld } + + progressBars := make([]*pb.ProgressBar, numLayers) + for i, srcInfo := range srcInfos { + bar := createProgressBar(srcInfo, "blob", nil) + progressBars[i] = bar + } + + progressPool := pb.NewPool(progressBars...) + progressPool.Output = ic.c.reportWriter + if err := progressPool.Start(); err != nil { + return errors.Wrapf(err, "error creating progress-bar pool") + } + + for i, srcLayer := range srcInfos { + copySemaphore.Acquire(ctx, 1) + go copyLayerHelper(i, srcLayer, progressBars[i]) + } + + destInfos := make([]types.BlobInfo, numLayers) + diffIDs := make([]digest.Digest, numLayers) + + copyGroup.Wait() + progressPool.Stop() + + for i, cld := range data { + if cld.err != nil { + return cld.err + } + destInfos[i] = cld.destInfo + diffIDs[i] = cld.diffID + } + ic.manifestUpdates.InformationOnly.LayerInfos = destInfos if ic.diffIDsAreNeeded { ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs @@ -487,12 +564,14 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte func (c *copier) copyConfig(ctx context.Context, src types.Image) error { srcInfo := src.ConfigInfo() if srcInfo.Digest != "" { - c.Printf("Copying config %s\n", srcInfo.Digest) configBlob, err := src.ConfigBlob(ctx) if err != nil { return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest) } - destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true) + bar := createProgressBar(srcInfo, "config", c.reportWriter) + defer bar.Finish() + bar.Start() + destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar) if err != nil { return err } @@ -512,7 +591,7 @@ type diffIDResult struct { // copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded -func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) { +func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, bar *pb.ProgressBar) (types.BlobInfo, digest.Digest, error) { cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" @@ -523,13 +602,14 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest) } if reused { - ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest) + bar.Prefix(fmt.Sprintf("Skipping blob %s (already present):", shortDigest(srcInfo.Digest))) + bar.Add64(bar.Total) + bar.Finish() return blobInfo, cachedDiffID, nil } } // Fallback: copy the layer, computing the diffID if we need to do so - ic.c.Printf("Copying blob %s\n", srcInfo.Digest) srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) @@ -537,7 +617,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t defer srcStream.Close() blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, - diffIDIsNeeded) + diffIDIsNeeded, bar) if err != nil { return types.BlobInfo{}, "", err } @@ -565,7 +645,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t // perhaps compressing the stream if canCompress, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) { + diffIDIsNeeded bool, bar *pb.ProgressBar) (types.BlobInfo, <-chan diffIDResult, error) { var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil var diffIDChan chan diffIDResult @@ -589,7 +669,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea return pipeWriter } } - blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false) // Sets err to nil on success + blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } @@ -626,7 +706,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) // and returns a complete blobInfo of the copied blob. func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, - canModifyBlob bool, isConfig bool) (types.BlobInfo, error) { + canModifyBlob bool, isConfig bool, bar *pb.ProgressBar) (types.BlobInfo, error) { // The copying happens through a pipeline of connected io.Readers. // === Input: srcStream @@ -649,16 +729,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) } isCompressed := decompressor != nil - - // === Report progress using a pb.Reader. - bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES) - bar.Output = c.reportWriter - bar.SetMaxWidth(80) - bar.ShowTimeLeft = false - bar.ShowPercent = false - bar.Start() destStream = bar.NewProxyReader(destStream) - defer bar.Finish() // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. @@ -761,7 +832,7 @@ func compressGoroutine(dest *io.PipeWriter, src io.Reader) { dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() }() - zipper := gzip.NewWriter(dest) + zipper := pgzip.NewWriter(dest) defer zipper.Close() _, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close() diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go index d75c195b2..4b2ab022e 100644 --- a/vendor/github.com/containers/image/directory/directory_dest.go +++ b/vendor/github.com/containers/image/directory/directory_dest.go @@ -124,6 +124,11 @@ func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool { return false // N/A, DockerReference() returns nil. } +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *dirImageDestination) HasThreadSafePutBlob() bool { + return false +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go index 3625def80..59b625397 100644 --- a/vendor/github.com/containers/image/directory/directory_src.go +++ b/vendor/github.com/containers/image/directory/directory_src.go @@ -48,6 +48,11 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest return m, manifest.GuessMIMEType(m), err } +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *dirImageSource) HasThreadSafeGetBlob() bool { + return false +} + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go index 7f55dbe7f..23d2ac70f 100644 --- a/vendor/github.com/containers/image/docker/docker_client.go +++ b/vendor/github.com/containers/image/docker/docker_client.go @@ -13,6 +13,7 @@ import ( "path/filepath" "strconv" "strings" + "sync" "time" "github.com/containers/image/docker/reference" @@ -84,6 +85,7 @@ type dockerClient struct { registry string client *http.Client insecureSkipTLSVerify bool + // The following members are not set by newDockerClient and must be set by callers if needed. username string password string @@ -95,8 +97,13 @@ type dockerClient struct { scheme string // Empty value also used to indicate detectProperties() has not yet succeeded. challenges []challenge supportsSignatures bool - // Private state for setupRequestAuth - tokenCache map[string]bearerToken + + // Private state for setupRequestAuth (key: string, value: bearerToken) + tokenCache sync.Map + // detectPropertiesError caches the initial error. + detectPropertiesError error + // detectPropertiesOnce is used to execuute detectProperties() at most once in in makeRequest(). + detectPropertiesOnce sync.Once } type authScope struct { @@ -262,7 +269,6 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc registry: registry, client: &http.Client{Transport: tr}, insecureSkipTLSVerify: skipVerify, - tokenCache: map[string]bearerToken{}, }, nil } @@ -473,14 +479,18 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error { cacheKey = fmt.Sprintf("%s:%s", c.extraScope.remoteName, c.extraScope.actions) scopes = append(scopes, *c.extraScope) } - token, ok := c.tokenCache[cacheKey] - if !ok || time.Now().After(token.expirationTime) { + var token bearerToken + t, inCache := c.tokenCache.Load(cacheKey) + if inCache { + token = t.(bearerToken) + } + if !inCache || time.Now().After(token.expirationTime) { t, err := c.getBearerToken(req.Context(), challenge, scopes) if err != nil { return err } token = *t - c.tokenCache[cacheKey] = token + c.tokenCache.Store(cacheKey, token) } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token)) return nil @@ -545,9 +555,9 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, return newBearerTokenFromJSONBlob(tokenBlob) } -// detectProperties detects various properties of the registry. -// See the dockerClient documentation for members which are affected by this. -func (c *dockerClient) detectProperties(ctx context.Context) error { +// detectPropertiesHelper performs the work of detectProperties which executes +// it at most once. +func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { if c.scheme != "" { return nil } @@ -604,6 +614,13 @@ func (c *dockerClient) detectProperties(ctx context.Context) error { return err } +// detectProperties detects various properties of the registry. +// See the dockerClient documentation for members which are affected by this. +func (c *dockerClient) detectProperties(ctx context.Context) error { + c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) }) + return c.detectPropertiesError +} + // getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, // using the original data structures. func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go index 2f471f648..973d160d0 100644 --- a/vendor/github.com/containers/image/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/docker/docker_image_dest.go @@ -111,6 +111,11 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) { return len(p), nil } +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *dockerImageDestination) HasThreadSafePutBlob() bool { + return false +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go index fbed6297f..c88ff2f34 100644 --- a/vendor/github.com/containers/image/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/docker/docker_image_src.go @@ -161,6 +161,11 @@ func getBlobSize(resp *http.Response) int64 { return size } +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *dockerImageSource) HasThreadSafeGetBlob() bool { + return true +} + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go index ad8a48a03..5f30eddbc 100644 --- a/vendor/github.com/containers/image/docker/tarfile/dest.go +++ b/vendor/github.com/containers/image/docker/tarfile/dest.go @@ -82,6 +82,11 @@ func (d *Destination) IgnoresEmbeddedDockerReference() bool { return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. } +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *Destination) HasThreadSafePutBlob() bool { + return false +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go index d94ed9783..889e5f8e8 100644 --- a/vendor/github.com/containers/image/docker/tarfile/src.go +++ b/vendor/github.com/containers/image/docker/tarfile/src.go @@ -397,6 +397,11 @@ func (r uncompressedReadCloser) Close() error { return res } +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *Source) HasThreadSafeGetBlob() bool { + return false +} + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. diff --git a/vendor/github.com/containers/image/oci/archive/oci_dest.go b/vendor/github.com/containers/image/oci/archive/oci_dest.go index 3997ac2ee..9571c37e2 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/oci/archive/oci_dest.go @@ -77,6 +77,11 @@ func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool { return d.unpackedDest.IgnoresEmbeddedDockerReference() } +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool { + return false +} + // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go index 084d818f7..ca74f950b 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_src.go +++ b/vendor/github.com/containers/image/oci/archive/oci_src.go @@ -76,6 +76,11 @@ func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest return s.unpackedSrc.GetManifest(ctx, instanceDigest) } +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool { + return false +} + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go index b5a6e08f8..db102184d 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go @@ -107,6 +107,11 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { return false // N/A, DockerReference() returns nil. } +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ociImageDestination) HasThreadSafePutBlob() bool { + return false +} + // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go index 086a7040d..cc536f69e 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/oci/layout/oci_src.go @@ -92,6 +92,11 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest return m, mimeType, nil } +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ociImageSource) HasThreadSafeGetBlob() bool { + return false +} + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go index 0cce1e6c7..814c3eea1 100644 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ b/vendor/github.com/containers/image/openshift/openshift.go @@ -211,6 +211,11 @@ func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest * return s.docker.GetManifest(ctx, instanceDigest) } +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *openshiftImageSource) HasThreadSafeGetBlob() bool { + return false +} + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. @@ -378,6 +383,11 @@ func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool { return d.docker.IgnoresEmbeddedDockerReference() } +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *openshiftImageDestination) HasThreadSafePutBlob() bool { + return false +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go index 064898948..d69f4fa33 100644 --- a/vendor/github.com/containers/image/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/ostree/ostree_dest.go @@ -4,7 +4,6 @@ package ostree import ( "bytes" - "compress/gzip" "context" "encoding/base64" "encoding/json" @@ -24,6 +23,7 @@ import ( "github.com/containers/image/manifest" "github.com/containers/image/types" "github.com/containers/storage/pkg/archive" + "github.com/klauspost/pgzip" "github.com/opencontainers/go-digest" selinux "github.com/opencontainers/selinux/go-selinux" "github.com/ostreedev/ostree-go/pkg/otbuiltin" @@ -132,6 +132,11 @@ func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool { return false // N/A, DockerReference() returns nil. } +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { + return false +} + // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. @@ -249,7 +254,7 @@ func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch strin } func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) { - mfz := gzip.NewWriter(output) + mfz := pgzip.NewWriter(output) defer mfz.Close() metaPacker := storage.NewJSONPacker(mfz) diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go index e73cae198..df432c9f3 100644 --- a/vendor/github.com/containers/image/ostree/ostree_src.go +++ b/vendor/github.com/containers/image/ostree/ostree_src.go @@ -4,7 +4,6 @@ package ostree import ( "bytes" - "compress/gzip" "context" "encoding/base64" "fmt" @@ -17,6 +16,7 @@ import ( "github.com/containers/image/manifest" "github.com/containers/image/types" "github.com/containers/storage/pkg/ioutils" + "github.com/klauspost/pgzip" "github.com/opencontainers/go-digest" glib "github.com/ostreedev/ostree-go/pkg/glibobject" "github.com/pkg/errors" @@ -255,6 +255,11 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, return getter.Get(path) } +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ostreeImageSource) HasThreadSafeGetBlob() bool { + return false +} + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. @@ -304,7 +309,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca } mf := bytes.NewReader(tarsplit) - mfz, err := gzip.NewReader(mf) + mfz, err := pgzip.NewReader(mf) if err != nil { return nil, 0, err } diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/pkg/blobinfocache/default.go index 6da9f2805..459ae5c06 100644 --- a/vendor/github.com/containers/image/pkg/blobinfocache/default.go +++ b/vendor/github.com/containers/image/pkg/blobinfocache/default.go @@ -54,7 +54,7 @@ func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { } path := filepath.Join(dir, blobInfoCacheFilename) if err := os.MkdirAll(dir, 0700); err != nil { - logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", err) + logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err) return NewMemoryCache() } diff --git a/vendor/github.com/containers/image/pkg/compression/compression.go b/vendor/github.com/containers/image/pkg/compression/compression.go index a39523e4f..aad2bfcf2 100644 --- a/vendor/github.com/containers/image/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/pkg/compression/compression.go @@ -3,10 +3,10 @@ package compression import ( "bytes" "compress/bzip2" - "compress/gzip" "io" "io/ioutil" + "github.com/klauspost/pgzip" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/ulikunitz/xz" @@ -18,7 +18,7 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error) // GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { - return gzip.NewReader(r) + return pgzip.NewReader(r) } // Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. diff --git a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go index afc7312d1..9e3e9cfe1 100644 --- a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go @@ -318,8 +318,37 @@ func FindUnqualifiedSearchRegistries(ctx *types.SystemContext) ([]Registry, erro return unqualified, nil } -// FindRegistry returns the Registry with the longest prefix for ref. If no -// Registry prefixes the image, nil is returned. +// refMatchesPrefix returns true iff ref, +// which is a registry, repository namespace, repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!), +// matches a Registry.Prefix value. +// (This is split from the caller primarily to make testing easier.) +func refMatchesPrefix(ref, prefix string) bool { + switch { + case len(ref) < len(prefix): + return false + case len(ref) == len(prefix): + return ref == prefix + case len(ref) > len(prefix): + if !strings.HasPrefix(ref, prefix) { + return false + } + c := ref[len(prefix)] + // This allows "example.com:5000" to match "example.com", + // which is unintended; that will get fixed eventually, DON'T RELY + // ON THE CURRENT BEHAVIOR. + return c == ':' || c == '/' || c == '@' + default: + panic("Internal error: impossible comparison outcome") + } +} + +// FindRegistry returns the Registry with the longest prefix for ref, +// which is a registry, repository namespace repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!). +// If no Registry prefixes the image, nil is returned. func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { registries, err := GetRegistries(ctx) if err != nil { @@ -329,7 +358,7 @@ func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { reg := Registry{} prefixLen := 0 for _, r := range registries { - if strings.HasPrefix(ref, r.Prefix+"/") || ref == r.Prefix { + if refMatchesPrefix(ref, r.Prefix) { length := len(r.Prefix) if length > prefixLen { reg = r diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go index bd6813119..b53fbdf6e 100644 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ b/vendor/github.com/containers/image/storage/storage_image.go @@ -11,6 +11,7 @@ import ( "io/ioutil" "os" "path/filepath" + "sync" "sync/atomic" "github.com/containers/image/image" @@ -47,6 +48,7 @@ type storageImageSource struct { image *storage.Image layerPosition map[digest.Digest]int // Where we are in reading a blob's layers cachedManifest []byte // A cached copy of the manifest, if already known, or nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice } @@ -56,6 +58,7 @@ type storageImageDestination struct { nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs manifest []byte // Manifest contents, temporary signatures []byte // Signature contents, temporary + putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them @@ -91,15 +94,20 @@ func newImageSource(imageRef storageReference) (*storageImageSource, error) { } // Reference returns the image reference that we used to find this image. -func (s storageImageSource) Reference() types.ImageReference { +func (s *storageImageSource) Reference() types.ImageReference { return s.imageRef } // Close cleans up any resources we tied up while reading the image. -func (s storageImageSource) Close() error { +func (s *storageImageSource) Close() error { return nil } +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *storageImageSource) HasThreadSafeGetBlob() bool { + return true +} + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. @@ -137,8 +145,10 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC // Step through the list of matching layers. Tests may want to verify that if we have multiple layers // which claim to have the same contents, that we actually do have multiple layers, otherwise we could // just go ahead and use the first one every time. + s.getBlobMutex.Lock() i := s.layerPosition[info.Digest] s.layerPosition[info.Digest] = i + 1 + s.getBlobMutex.Unlock() if len(layers) > 0 { layer = layers[i%len(layers)] } @@ -300,7 +310,7 @@ func newImageDestination(imageRef storageReference) (*storageImageDestination, e // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (s storageImageDestination) Reference() types.ImageReference { +func (s *storageImageDestination) Reference() types.ImageReference { return s.imageRef } @@ -309,7 +319,7 @@ func (s *storageImageDestination) Close() error { return os.RemoveAll(s.directory) } -func (s storageImageDestination) DesiredLayerCompression() types.LayerCompression { +func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression { // We ultimately have to decompress layers to populate trees on disk, // so callers shouldn't bother compressing them before handing them to // us, if they're not already compressed. @@ -320,6 +330,11 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string { return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) } +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (s *storageImageDestination) HasThreadSafePutBlob() bool { + return true +} + // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. @@ -370,9 +385,11 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, return errorBlobInfo, ErrBlobSizeMismatch } // Record information about the blob. + s.putBlobMutex.Lock() s.blobDiffIDs[hasher.Digest()] = diffID.Digest() s.fileSizes[hasher.Digest()] = counter.Count s.filenames[hasher.Digest()] = filename + s.putBlobMutex.Unlock() blobDigest := blobinfo.Digest if blobDigest.Validate() != nil { blobDigest = hasher.Digest() @@ -398,6 +415,9 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + // lock the entire method as it executes fairly quickly + s.putBlobMutex.Lock() + defer s.putBlobMutex.Unlock() if blobinfo.Digest == "" { return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) } diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/tarball/tarball_src.go index ee963b8d8..76e3e755f 100644 --- a/vendor/github.com/containers/image/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/tarball/tarball_src.go @@ -2,7 +2,6 @@ package tarball import ( "bytes" - "compress/gzip" "context" "encoding/json" "fmt" @@ -14,7 +13,7 @@ import ( "time" "github.com/containers/image/types" - + "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" imgspecs "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -77,7 +76,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System // Set up to digest the file after we maybe decompress it. diffIDdigester := digest.Canonical.Digester() - uncompressed, err := gzip.NewReader(reader) + uncompressed, err := pgzip.NewReader(reader) if err == nil { // It is compressed, so the diffID is the digest of the uncompressed version reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) @@ -207,6 +206,11 @@ func (is *tarballImageSource) Close() error { return nil } +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (is *tarballImageSource) HasThreadSafeGetBlob() bool { + return false +} + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go index dda332776..9fdab2314 100644 --- a/vendor/github.com/containers/image/types/types.go +++ b/vendor/github.com/containers/image/types/types.go @@ -198,6 +198,8 @@ type ImageSource interface { // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) + // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. + HasThreadSafeGetBlob() bool // GetSignatures returns the image's signatures. It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list @@ -264,6 +266,8 @@ type ImageDestination interface { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) + // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. + HasThreadSafePutBlob() bool // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf index 88537981a..bbbb1a458 100644 --- a/vendor/github.com/containers/image/vendor.conf +++ b/vendor/github.com/containers/image/vendor.conf @@ -16,7 +16,7 @@ github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50 github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9 -github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc +github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 github.com/opencontainers/image-spec v1.0.0 github.com/opencontainers/runc 6b1d0e76f239ffb435445e5ae316d2676c07c6e3 github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9 @@ -26,8 +26,9 @@ github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 github.com/vbatts/tar-split v0.10.2 golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8 golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe +golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08 -gopkg.in/cheggaaa/pb.v1 d7e6ca3010b6f084d8056847f55d7f572f180678 +gopkg.in/cheggaaa/pb.v1 v1.0.27 gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6 github.com/xeipuuv/gojsonschema master @@ -36,7 +37,7 @@ github.com/xeipuuv/gojsonpointer master github.com/tchap/go-patricia v2.2.6 github.com/opencontainers/selinux 077c8b6d1c18456fb7c792bc0de52295a0d1900e github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0 -github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 +github.com/ostreedev/ostree-go 56f3a639dbc0f2f5051c6d52dade28a882ba78ce github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8 github.com/pquerna/ffjson master github.com/syndtr/gocapability master @@ -44,3 +45,6 @@ github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175 github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a github.com/ulikunitz/xz v0.5.4 github.com/boltdb/bolt master +github.com/klauspost/pgzip v1.2.1 +github.com/klauspost/compress v1.4.1 +github.com/klauspost/cpuid v1.2.0 diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE b/vendor/github.com/opencontainers/go-digest/LICENSE new file mode 100644 index 000000000..0ea3ff81e --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.code b/vendor/github.com/opencontainers/go-digest/LICENSE.code deleted file mode 100644 index 0ea3ff81e..000000000 --- a/vendor/github.com/opencontainers/go-digest/LICENSE.code +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md index 0f5a04092..25aac3470 100644 --- a/vendor/github.com/opencontainers/go-digest/README.md +++ b/vendor/github.com/opencontainers/go-digest/README.md @@ -101,4 +101,4 @@ the various OCI projects). # Copyright and license -Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. +Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go index bdff42d92..8813bd26f 100644 --- a/vendor/github.com/opencontainers/go-digest/algorithm.go +++ b/vendor/github.com/opencontainers/go-digest/algorithm.go @@ -19,6 +19,7 @@ import ( "fmt" "hash" "io" + "regexp" ) // Algorithm identifies and implementation of a digester by an identifier. @@ -28,9 +29,9 @@ type Algorithm string // supported digest types const ( - SHA256 Algorithm = "sha256" // sha256 with hex encoding - SHA384 Algorithm = "sha384" // sha384 with hex encoding - SHA512 Algorithm = "sha512" // sha512 with hex encoding + SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only) + SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only) + SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only) // Canonical is the primary digest algorithm used with the distribution // project. Other digests may be used but this one is the primary storage @@ -50,6 +51,14 @@ var ( SHA384: crypto.SHA384, SHA512: crypto.SHA512, } + + // anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests. + // Note that /A-F/ disallowed. + anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{ + SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`), + SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`), + SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`), + } ) // Available returns true if the digest type is available for use. If this @@ -125,6 +134,14 @@ func (a Algorithm) Hash() hash.Hash { return algorithms[a].New() } +// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into +// the encoded portion of the digest. +func (a Algorithm) Encode(d []byte) string { + // TODO(stevvooe): Currently, all algorithms use a hex encoding. When we + // add support for back registration, we can modify this accordingly. + return fmt.Sprintf("%x", d) +} + // FromReader returns the digest of the reader using the algorithm. func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { digester := a.Digester() @@ -156,3 +173,20 @@ func (a Algorithm) FromBytes(p []byte) Digest { func (a Algorithm) FromString(s string) Digest { return a.FromBytes([]byte(s)) } + +// Validate validates the encoded portion string +func (a Algorithm) Validate(encoded string) error { + r, ok := anchoredEncodedRegexps[a] + if !ok { + return ErrDigestUnsupported + } + // Digests much always be hex-encoded, ensuring that their hex portion will + // always be size*2 + if a.Size()*2 != len(encoded) { + return ErrDigestInvalidLength + } + if r.MatchString(encoded) { + return nil + } + return ErrDigestInvalidFormat +} diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go index 69e1d2b54..ad398cba2 100644 --- a/vendor/github.com/opencontainers/go-digest/digest.go +++ b/vendor/github.com/opencontainers/go-digest/digest.go @@ -45,16 +45,21 @@ func NewDigest(alg Algorithm, h hash.Hash) Digest { // functions. This is also useful for rebuilding digests from binary // serializations. func NewDigestFromBytes(alg Algorithm, p []byte) Digest { - return Digest(fmt.Sprintf("%s:%x", alg, p)) + return NewDigestFromEncoded(alg, alg.Encode(p)) } -// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. +// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded. func NewDigestFromHex(alg, hex string) Digest { - return Digest(fmt.Sprintf("%s:%s", alg, hex)) + return NewDigestFromEncoded(Algorithm(alg), hex) +} + +// NewDigestFromEncoded returns a Digest from alg and the encoded digest. +func NewDigestFromEncoded(alg Algorithm, encoded string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, encoded)) } // DigestRegexp matches valid digest types. -var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) +var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`) // DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) @@ -96,26 +101,18 @@ func FromString(s string) Digest { // error if not. func (d Digest) Validate() error { s := string(d) - i := strings.Index(s, ":") - - // validate i then run through regexp - if i < 0 || i+1 == len(s) || !DigestRegexpAnchored.MatchString(s) { + if i <= 0 || i+1 == len(s) { return ErrDigestInvalidFormat } - - algorithm := Algorithm(s[:i]) + algorithm, encoded := Algorithm(s[:i]), s[i+1:] if !algorithm.Available() { + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } return ErrDigestUnsupported } - - // Digests much always be hex-encoded, ensuring that their hex portion will - // always be size*2 - if algorithm.Size()*2 != len(s[i+1:]) { - return ErrDigestInvalidLength - } - - return nil + return algorithm.Validate(encoded) } // Algorithm returns the algorithm portion of the digest. This will panic if @@ -133,12 +130,17 @@ func (d Digest) Verifier() Verifier { } } -// Hex returns the hex digest portion of the digest. This will panic if the +// Encoded returns the encoded portion of the digest. This will panic if the // underlying digest is not in a valid format. -func (d Digest) Hex() string { +func (d Digest) Encoded() string { return string(d[d.sepIndex()+1:]) } +// Hex is deprecated. Please use Digest.Encoded. +func (d Digest) Hex() string { + return d.Encoded() +} + func (d Digest) String() string { return string(d) } diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/README.md b/vendor/golang.org/x/sync/README.md new file mode 100644 index 000000000..1f8436cc9 --- /dev/null +++ b/vendor/golang.org/x/sync/README.md @@ -0,0 +1,18 @@ +# Go Sync + +This repository provides Go concurrency primitives in addition to the +ones provided by the language and "sync" and "sync/atomic" packages. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/sync`. You can +also manually git clone the repository to `$GOPATH/src/golang.org/x/sync`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the sync repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/sync:" in the +subject line, so it is easy to find. diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 000000000..ac53e733e --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,127 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + s.waiters.Remove(elem) + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: bad release") + } + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } + s.mu.Unlock() +} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/README.md b/vendor/gopkg.in/cheggaaa/pb.v1/README.md index 31babb305..1295df70e 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/README.md +++ b/vendor/gopkg.in/cheggaaa/pb.v1/README.md @@ -1,7 +1,8 @@ # Terminal progress bar for Go -Simple progress bar for console programs. - +Simple progress bar for console programs. + +Please check the new version https://github.com/cheggaaa/pb/tree/v2 (currently, it's beta) ## Installation @@ -170,7 +171,7 @@ The result will be as follows: ``` $ go run example/multiple.go -First 141 / 1000 [===============>---------------------------------------] 14.10 % 44s -Second 139 / 1000 [==============>---------------------------------------] 13.90 % 44s -Third 152 / 1000 [================>--------------------------------------] 15.20 % 40s +First 34 / 200 [=========>---------------------------------------------] 17.00% 00m08s +Second 42 / 200 [===========>------------------------------------------] 21.00% 00m06s +Third 36 / 200 [=========>---------------------------------------------] 18.00% 00m08s ``` diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/format.go b/vendor/gopkg.in/cheggaaa/pb.v1/format.go index d5aeff793..8bb8a7a1d 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/format.go +++ b/vendor/gopkg.in/cheggaaa/pb.v1/format.go @@ -2,7 +2,6 @@ package pb import ( "fmt" - "strings" "time" ) @@ -11,12 +10,26 @@ type Units int const ( // U_NO are default units, they represent a simple value and are not formatted at all. U_NO Units = iota - // U_BYTES units are formatted in a human readable way (b, Bb, Mb, ...) + // U_BYTES units are formatted in a human readable way (B, KiB, MiB, ...) U_BYTES + // U_BYTES_DEC units are like U_BYTES, but base 10 (B, KB, MB, ...) + U_BYTES_DEC // U_DURATION units are formatted in a human readable way (3h14m15s) U_DURATION ) +const ( + KiB = 1024 + MiB = 1048576 + GiB = 1073741824 + TiB = 1099511627776 + + KB = 1e3 + MB = 1e6 + GB = 1e9 + TB = 1e12 +) + func Format(i int64) *formatter { return &formatter{n: i} } @@ -28,11 +41,6 @@ type formatter struct { perSec bool } -func (f *formatter) Value(n int64) *formatter { - f.n = n - return f -} - func (f *formatter) To(unit Units) *formatter { f.unit = unit return f @@ -52,13 +60,10 @@ func (f *formatter) String() (out string) { switch f.unit { case U_BYTES: out = formatBytes(f.n) + case U_BYTES_DEC: + out = formatBytesDec(f.n) case U_DURATION: - d := time.Duration(f.n) - if d > time.Hour*24 { - out = fmt.Sprintf("%dd", d/24/time.Hour) - d -= (d / time.Hour / 24) * (time.Hour * 24) - } - out = fmt.Sprintf("%s%v", out, d) + out = formatDuration(f.n) default: out = fmt.Sprintf(fmt.Sprintf("%%%dd", f.width), f.n) } @@ -68,20 +73,53 @@ func (f *formatter) String() (out string) { return } -// Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B +// Convert bytes to human readable string. Like 2 MiB, 64.2 KiB, 52 B func formatBytes(i int64) (result string) { switch { - case i > (1024 * 1024 * 1024 * 1024): - result = fmt.Sprintf("%.02f TB", float64(i)/1024/1024/1024/1024) - case i > (1024 * 1024 * 1024): - result = fmt.Sprintf("%.02f GB", float64(i)/1024/1024/1024) - case i > (1024 * 1024): - result = fmt.Sprintf("%.02f MB", float64(i)/1024/1024) - case i > 1024: - result = fmt.Sprintf("%.02f KB", float64(i)/1024) + case i >= TiB: + result = fmt.Sprintf("%.02f TiB", float64(i)/TiB) + case i >= GiB: + result = fmt.Sprintf("%.02f GiB", float64(i)/GiB) + case i >= MiB: + result = fmt.Sprintf("%.02f MiB", float64(i)/MiB) + case i >= KiB: + result = fmt.Sprintf("%.02f KiB", float64(i)/KiB) default: result = fmt.Sprintf("%d B", i) } - result = strings.Trim(result, " ") + return +} + +// Convert bytes to base-10 human readable string. Like 2 MB, 64.2 KB, 52 B +func formatBytesDec(i int64) (result string) { + switch { + case i >= TB: + result = fmt.Sprintf("%.02f TB", float64(i)/TB) + case i >= GB: + result = fmt.Sprintf("%.02f GB", float64(i)/GB) + case i >= MB: + result = fmt.Sprintf("%.02f MB", float64(i)/MB) + case i >= KB: + result = fmt.Sprintf("%.02f KB", float64(i)/KB) + default: + result = fmt.Sprintf("%d B", i) + } + return +} + +func formatDuration(n int64) (result string) { + d := time.Duration(n) + if d > time.Hour*24 { + result = fmt.Sprintf("%dd", d/24/time.Hour) + d -= (d / time.Hour / 24) * (time.Hour * 24) + } + if d > time.Hour { + result = fmt.Sprintf("%s%dh", result, d/time.Hour) + d -= d / time.Hour * time.Hour + } + m := d / time.Minute + d -= m * time.Minute + s := d / time.Second + result = fmt.Sprintf("%s%02dm%02ds", result, m, s) return } diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb.go index f1f15bff3..e03291b89 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pb.go +++ b/vendor/gopkg.in/cheggaaa/pb.v1/pb.go @@ -13,7 +13,7 @@ import ( ) // Current version -const Version = "1.0.6" +const Version = "1.0.27" const ( // Default refresh rate - 200ms @@ -37,18 +37,17 @@ func New(total int) *ProgressBar { // Create new progress bar object using int64 as total func New64(total int64) *ProgressBar { pb := &ProgressBar{ - Total: total, - RefreshRate: DEFAULT_REFRESH_RATE, - ShowPercent: true, - ShowCounters: true, - ShowBar: true, - ShowTimeLeft: true, - ShowFinalTime: true, - Units: U_NO, - ManualUpdate: false, - finish: make(chan struct{}), - currentValue: -1, - mu: new(sync.Mutex), + Total: total, + RefreshRate: DEFAULT_REFRESH_RATE, + ShowPercent: true, + ShowCounters: true, + ShowBar: true, + ShowTimeLeft: true, + ShowElapsedTime: false, + ShowFinalTime: true, + Units: U_NO, + ManualUpdate: false, + finish: make(chan struct{}), } return pb.Format(FORMAT) } @@ -67,13 +66,14 @@ func StartNew(total int) *ProgressBar { type Callback func(out string) type ProgressBar struct { - current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278) + current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278) + previous int64 Total int64 RefreshRate time.Duration ShowPercent, ShowCounters bool ShowSpeed, ShowTimeLeft, ShowBar bool - ShowFinalTime bool + ShowFinalTime, ShowElapsedTime bool Output io.Writer Callback Callback NotPrint bool @@ -91,13 +91,14 @@ type ProgressBar struct { finish chan struct{} isFinish bool - startTime time.Time - startValue int64 - currentValue int64 + startTime time.Time + startValue int64 + + changeTime time.Time prefix, postfix string - mu *sync.Mutex + mu sync.Mutex lastPrint string BarStart string @@ -112,8 +113,8 @@ type ProgressBar struct { // Start print func (pb *ProgressBar) Start() *ProgressBar { pb.startTime = time.Now() - pb.startValue = pb.current - if pb.Total == 0 { + pb.startValue = atomic.LoadInt64(&pb.current) + if atomic.LoadInt64(&pb.Total) == 0 { pb.ShowTimeLeft = false pb.ShowPercent = false pb.AutoStat = false @@ -158,12 +159,16 @@ func (pb *ProgressBar) Add64(add int64) int64 { // Set prefix string func (pb *ProgressBar) Prefix(prefix string) *ProgressBar { + pb.mu.Lock() + defer pb.mu.Unlock() pb.prefix = prefix return pb } // Set postfix string func (pb *ProgressBar) Postfix(postfix string) *ProgressBar { + pb.mu.Lock() + defer pb.mu.Unlock() pb.postfix = postfix return pb } @@ -173,7 +178,7 @@ func (pb *ProgressBar) Postfix(postfix string) *ProgressBar { // Example: bar.Format("[\x00=\x00>\x00-\x00]") // \x00 is the delimiter func (pb *ProgressBar) Format(format string) *ProgressBar { var formatEntries []string - if len(format) == 5 { + if utf8.RuneCountInString(format) == 5 { formatEntries = strings.Split(format, "") } else { formatEntries = strings.Split(format, "\x00") @@ -221,7 +226,9 @@ func (pb *ProgressBar) Finish() { //Protect multiple calls pb.finishOnce.Do(func() { close(pb.finish) - pb.write(atomic.LoadInt64(&pb.current)) + pb.write(atomic.LoadInt64(&pb.Total), atomic.LoadInt64(&pb.current)) + pb.mu.Lock() + defer pb.mu.Unlock() switch { case pb.Output != nil: fmt.Fprintln(pb.Output) @@ -232,6 +239,13 @@ func (pb *ProgressBar) Finish() { }) } +// IsFinished return boolean +func (pb *ProgressBar) IsFinished() bool { + pb.mu.Lock() + defer pb.mu.Unlock() + return pb.isFinish +} + // End print and write string 'str' func (pb *ProgressBar) FinishPrint(str string) { pb.Finish() @@ -262,16 +276,18 @@ func (pb *ProgressBar) NewProxyReader(r io.Reader) *Reader { return &Reader{r, pb} } -func (pb *ProgressBar) write(current int64) { +func (pb *ProgressBar) write(total, current int64) { + pb.mu.Lock() + defer pb.mu.Unlock() width := pb.GetWidth() - var percentBox, countersBox, timeLeftBox, speedBox, barBox, end, out string + var percentBox, countersBox, timeLeftBox, timeSpentBox, speedBox, barBox, end, out string // percents if pb.ShowPercent { var percent float64 - if pb.Total > 0 { - percent = float64(current) / (float64(pb.Total) / float64(100)) + if total > 0 { + percent = float64(current) / (float64(total) / float64(100)) } else { percent = float64(current) / float64(100) } @@ -281,17 +297,24 @@ func (pb *ProgressBar) write(current int64) { // counters if pb.ShowCounters { current := Format(current).To(pb.Units).Width(pb.UnitsWidth) - if pb.Total > 0 { - total := Format(pb.Total).To(pb.Units).Width(pb.UnitsWidth) - countersBox = fmt.Sprintf(" %s / %s ", current, total) + if total > 0 { + totalS := Format(total).To(pb.Units).Width(pb.UnitsWidth) + countersBox = fmt.Sprintf(" %s / %s ", current, totalS) } else { countersBox = fmt.Sprintf(" %s / ? ", current) } } // time left - fromStart := time.Now().Sub(pb.startTime) currentFromStart := current - pb.startValue + fromStart := time.Now().Sub(pb.startTime) + lastChangeTime := pb.changeTime + fromChange := lastChangeTime.Sub(pb.startTime) + + if pb.ShowElapsedTime { + timeSpentBox = fmt.Sprintf(" %s ", (fromStart/time.Second)*time.Second) + } + select { case <-pb.finish: if pb.ShowFinalTime { @@ -301,17 +324,20 @@ func (pb *ProgressBar) write(current int64) { } default: if pb.ShowTimeLeft && currentFromStart > 0 { - perEntry := fromStart / time.Duration(currentFromStart) + perEntry := fromChange / time.Duration(currentFromStart) var left time.Duration - if pb.Total > 0 { - left = time.Duration(pb.Total-currentFromStart) * perEntry + if total > 0 { + left = time.Duration(total-currentFromStart) * perEntry + left -= time.Since(lastChangeTime) left = (left / time.Second) * time.Second } else { left = time.Duration(currentFromStart) * perEntry left = (left / time.Second) * time.Second } - timeLeft := Format(int64(left)).To(U_DURATION).String() - timeLeftBox = fmt.Sprintf(" %s", timeLeft) + if left > 0 { + timeLeft := Format(int64(left)).To(U_DURATION).String() + timeLeftBox = fmt.Sprintf(" %s", timeLeft) + } } } @@ -326,30 +352,38 @@ func (pb *ProgressBar) write(current int64) { speedBox = " " + Format(int64(speed)).To(pb.Units).Width(pb.UnitsWidth).PerSec().String() } - barWidth := escapeAwareRuneCountInString(countersBox + pb.BarStart + pb.BarEnd + percentBox + timeLeftBox + speedBox + pb.prefix + pb.postfix) + barWidth := escapeAwareRuneCountInString(countersBox + pb.BarStart + pb.BarEnd + percentBox + timeSpentBox + timeLeftBox + speedBox + pb.prefix + pb.postfix) // bar if pb.ShowBar { size := width - barWidth if size > 0 { - if pb.Total > 0 { - curCount := int(math.Ceil((float64(current) / float64(pb.Total)) * float64(size))) - emptCount := size - curCount + if total > 0 { + curSize := int(math.Ceil((float64(current) / float64(total)) * float64(size))) + emptySize := size - curSize barBox = pb.BarStart - if emptCount < 0 { - emptCount = 0 + if emptySize < 0 { + emptySize = 0 } - if curCount > size { - curCount = size + if curSize > size { + curSize = size } - if emptCount <= 0 { - barBox += strings.Repeat(pb.Current, curCount) - } else if curCount > 0 { - barBox += strings.Repeat(pb.Current, curCount-1) + pb.CurrentN + + cursorLen := escapeAwareRuneCountInString(pb.Current) + if emptySize <= 0 { + barBox += strings.Repeat(pb.Current, curSize/cursorLen) + } else if curSize > 0 { + cursorEndLen := escapeAwareRuneCountInString(pb.CurrentN) + cursorRepetitions := (curSize - cursorEndLen) / cursorLen + barBox += strings.Repeat(pb.Current, cursorRepetitions) + barBox += pb.CurrentN } - barBox += strings.Repeat(pb.Empty, emptCount) + pb.BarEnd + + emptyLen := escapeAwareRuneCountInString(pb.Empty) + barBox += strings.Repeat(pb.Empty, emptySize/emptyLen) + barBox += pb.BarEnd } else { - barBox = pb.BarStart pos := size - int(current)%int(size) + barBox = pb.BarStart if pos-1 > 0 { barBox += strings.Repeat(pb.Empty, pos-1) } @@ -363,17 +397,18 @@ func (pb *ProgressBar) write(current int64) { } // check len - out = pb.prefix + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix - if escapeAwareRuneCountInString(out) < width { - end = strings.Repeat(" ", width-utf8.RuneCountInString(out)) + out = pb.prefix + timeSpentBox + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix + + if cl := escapeAwareRuneCountInString(out); cl < width { + end = strings.Repeat(" ", width-cl) } // and print! - pb.mu.Lock() pb.lastPrint = out + end - pb.mu.Unlock() + isFinish := pb.isFinish + switch { - case pb.isFinish: + case isFinish: return case pb.Output != nil: fmt.Fprint(pb.Output, "\r"+out+end) @@ -406,24 +441,55 @@ func (pb *ProgressBar) GetWidth() int { // Write the current state of the progressbar func (pb *ProgressBar) Update() { c := atomic.LoadInt64(&pb.current) - if pb.AlwaysUpdate || c != pb.currentValue { - pb.write(c) - pb.currentValue = c + p := atomic.LoadInt64(&pb.previous) + t := atomic.LoadInt64(&pb.Total) + if p != c { + pb.mu.Lock() + pb.changeTime = time.Now() + pb.mu.Unlock() + atomic.StoreInt64(&pb.previous, c) } + pb.write(t, c) if pb.AutoStat { if c == 0 { pb.startTime = time.Now() pb.startValue = 0 - } else if c >= pb.Total && pb.isFinish != true { + } else if c >= t && pb.isFinish != true { pb.Finish() } } } +// String return the last bar print func (pb *ProgressBar) String() string { + pb.mu.Lock() + defer pb.mu.Unlock() return pb.lastPrint } +// SetTotal atomically sets new total count +func (pb *ProgressBar) SetTotal(total int) *ProgressBar { + return pb.SetTotal64(int64(total)) +} + +// SetTotal64 atomically sets new total count +func (pb *ProgressBar) SetTotal64(total int64) *ProgressBar { + atomic.StoreInt64(&pb.Total, total) + return pb +} + +// Reset bar and set new total count +// Does effect only on finished bar +func (pb *ProgressBar) Reset(total int) *ProgressBar { + pb.mu.Lock() + defer pb.mu.Unlock() + if pb.isFinish { + pb.SetTotal(total).Set(0) + atomic.StoreInt64(&pb.previous, 0) + } + return pb +} + // Internal loop for refreshing the progressbar func (pb *ProgressBar) refresher() { for { @@ -435,10 +501,3 @@ func (pb *ProgressBar) refresher() { } } } - -type window struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go index d85dbc3b2..17168f39a 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go +++ b/vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go @@ -1,4 +1,4 @@ -// +build appengine +// +build appengine js package pb diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go deleted file mode 100644 index c06097b43..000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build linux darwin freebsd netbsd openbsd dragonfly -// +build !appengine - -package pb - -import "syscall" - -const sysIoctl = syscall.SYS_IOCTL diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go deleted file mode 100644 index b7d461e17..000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build solaris -// +build !appengine - -package pb - -const sysIoctl = 54 diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go index 72f682835..9595e8236 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go +++ b/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go @@ -102,7 +102,7 @@ var echoLockMutex sync.Mutex var oldState word -func lockEcho() (quit chan int, err error) { +func lockEcho() (shutdownCh chan struct{}, err error) { echoLockMutex.Lock() defer echoLockMutex.Unlock() if echoLocked { @@ -124,6 +124,8 @@ func lockEcho() (quit chan int, err error) { err = fmt.Errorf("Can't set terminal settings: %v", e) return } + + shutdownCh = make(chan struct{}) return } diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go index 12e6fe6e2..af4251760 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go +++ b/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go @@ -1,5 +1,5 @@ // +build linux darwin freebsd netbsd openbsd solaris dragonfly -// +build !appengine +// +build !appengine !js package pb @@ -8,101 +8,109 @@ import ( "fmt" "os" "os/signal" - "runtime" "sync" "syscall" - "unsafe" -) -const ( - TIOCGWINSZ = 0x5413 - TIOCGWINSZ_OSX = 1074295912 + "golang.org/x/sys/unix" ) -var tty *os.File - var ErrPoolWasStarted = errors.New("Bar pool was started") -var echoLocked bool -var echoLockMutex sync.Mutex +var ( + echoLockMutex sync.Mutex + origTermStatePtr *unix.Termios + tty *os.File + istty bool +) func init() { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + var err error tty, err = os.Open("/dev/tty") + istty = true if err != nil { tty = os.Stdin + istty = false } } // terminalWidth returns width of the terminal. func terminalWidth() (int, error) { - w := new(window) - tio := syscall.TIOCGWINSZ - if runtime.GOOS == "darwin" { - tio = TIOCGWINSZ_OSX + if !istty { + return 0, errors.New("Not Supported") } - res, _, err := syscall.Syscall(sysIoctl, - tty.Fd(), - uintptr(tio), - uintptr(unsafe.Pointer(w)), - ) - if int(res) == -1 { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + + fd := int(tty.Fd()) + + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { return 0, err } - return int(w.Col), nil -} -var oldState syscall.Termios + return int(ws.Col), nil +} -func lockEcho() (quit chan int, err error) { +func lockEcho() (shutdownCh chan struct{}, err error) { echoLockMutex.Lock() defer echoLockMutex.Unlock() - if echoLocked { - err = ErrPoolWasStarted - return - } - echoLocked = true + if istty { + if origTermStatePtr != nil { + return shutdownCh, ErrPoolWasStarted + } - fd := tty.Fd() - if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 { - err = fmt.Errorf("Can't get terminal settings: %v", e) - return - } + fd := int(tty.Fd()) + + origTermStatePtr, err = unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, fmt.Errorf("Can't get terminal settings: %v", err) + } + + oldTermios := *origTermStatePtr + newTermios := oldTermios + newTermios.Lflag &^= syscall.ECHO + newTermios.Lflag |= syscall.ICANON | syscall.ISIG + newTermios.Iflag |= syscall.ICRNL + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newTermios); err != nil { + return nil, fmt.Errorf("Can't set terminal settings: %v", err) + } - newState := oldState - newState.Lflag &^= syscall.ECHO - newState.Lflag |= syscall.ICANON | syscall.ISIG - newState.Iflag |= syscall.ICRNL - if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); e != 0 { - err = fmt.Errorf("Can't set terminal settings: %v", e) - return } - quit = make(chan int, 1) - go catchTerminate(quit) + shutdownCh = make(chan struct{}) + go catchTerminate(shutdownCh) return } -func unlockEcho() (err error) { +func unlockEcho() error { echoLockMutex.Lock() defer echoLockMutex.Unlock() - if !echoLocked { - return - } - echoLocked = false - fd := tty.Fd() - if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 { - err = fmt.Errorf("Can't set terminal settings") + if istty { + if origTermStatePtr == nil { + return nil + } + + fd := int(tty.Fd()) + + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, origTermStatePtr); err != nil { + return fmt.Errorf("Can't set terminal settings: %v", err) + } + } - return + origTermStatePtr = nil + + return nil } // listen exit signals and restore terminal state -func catchTerminate(quit chan int) { +func catchTerminate(shutdownCh chan struct{}) { sig := make(chan os.Signal, 1) signal.Notify(sig, os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL) defer signal.Stop(sig) select { - case <-quit: + case <-shutdownCh: unlockEcho() case <-sig: unlockEcho() diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool.go b/vendor/gopkg.in/cheggaaa/pb.v1/pool.go index 0b4a4afa8..392e7599c 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pool.go +++ b/vendor/gopkg.in/cheggaaa/pb.v1/pool.go @@ -3,6 +3,7 @@ package pb import ( + "io" "sync" "time" ) @@ -11,22 +12,36 @@ import ( // You need call pool.Stop() after work func StartPool(pbs ...*ProgressBar) (pool *Pool, err error) { pool = new(Pool) - if err = pool.start(); err != nil { + if err = pool.Start(); err != nil { return } pool.Add(pbs...) return } +// NewPool initialises a pool with progress bars, but +// doesn't start it. You need to call Start manually +func NewPool(pbs ...*ProgressBar) (pool *Pool) { + pool = new(Pool) + pool.Add(pbs...) + return +} + type Pool struct { - RefreshRate time.Duration - bars []*ProgressBar - quit chan int - finishOnce sync.Once + Output io.Writer + RefreshRate time.Duration + bars []*ProgressBar + lastBarsCount int + shutdownCh chan struct{} + workerCh chan struct{} + m sync.Mutex + finishOnce sync.Once } // Add progress bars. func (p *Pool) Add(pbs ...*ProgressBar) { + p.m.Lock() + defer p.m.Unlock() for _, bar := range pbs { bar.ManualUpdate = true bar.NotPrint = true @@ -35,30 +50,38 @@ func (p *Pool) Add(pbs ...*ProgressBar) { } } -func (p *Pool) start() (err error) { +func (p *Pool) Start() (err error) { p.RefreshRate = DefaultRefreshRate - quit, err := lockEcho() + p.shutdownCh, err = lockEcho() if err != nil { return } - p.quit = make(chan int) - go p.writer(quit) + p.workerCh = make(chan struct{}) + go p.writer() return } -func (p *Pool) writer(finish chan int) { +func (p *Pool) writer() { var first = true + defer func() { + if first == false { + p.print(false) + } else { + p.print(true) + p.print(false) + } + close(p.workerCh) + }() + for { select { case <-time.After(p.RefreshRate): if p.print(first) { p.print(false) - finish <- 1 return } first = false - case <-p.quit: - finish <- 1 + case <-p.shutdownCh: return } } @@ -66,11 +89,16 @@ func (p *Pool) writer(finish chan int) { // Restore terminal state and close pool func (p *Pool) Stop() error { - // Wait until one final refresh has passed. - time.Sleep(p.RefreshRate) - p.finishOnce.Do(func() { - close(p.quit) + if p.shutdownCh != nil { + close(p.shutdownCh) + } }) + + // Wait for the worker to complete + select { + case <-p.workerCh: + } + return unlockEcho() } diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go b/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go index d7a5ace41..63598d378 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go +++ b/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go @@ -8,13 +8,18 @@ import ( ) func (p *Pool) print(first bool) bool { + p.m.Lock() + defer p.m.Unlock() var out string if !first { coords, err := getCursorPos() if err != nil { log.Panic(err) } - coords.Y -= int16(len(p.bars)) + coords.Y -= int16(p.lastBarsCount) + if coords.Y < 0 { + coords.Y = 0 + } coords.X = 0 err = setCursorPos(coords) @@ -24,12 +29,17 @@ func (p *Pool) print(first bool) bool { } isFinished := true for _, bar := range p.bars { - if !bar.isFinish { + if !bar.IsFinished() { isFinished = false } bar.Update() out += fmt.Sprintf("\r%s\n", bar.String()) } - fmt.Print(out) + if p.Output != nil { + fmt.Fprint(p.Output, out) + } else { + fmt.Print(out) + } + p.lastBarsCount = len(p.bars) return isFinished } diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go b/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go index d95b71d87..a8ae14d2f 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go +++ b/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go @@ -5,18 +5,25 @@ package pb import "fmt" func (p *Pool) print(first bool) bool { + p.m.Lock() + defer p.m.Unlock() var out string if !first { - out = fmt.Sprintf("\033[%dA", len(p.bars)) + out = fmt.Sprintf("\033[%dA", p.lastBarsCount) } isFinished := true for _, bar := range p.bars { - if !bar.isFinish { + if !bar.IsFinished() { isFinished = false } bar.Update() out += fmt.Sprintf("\r%s\n", bar.String()) } - fmt.Print(out) + if p.Output != nil { + fmt.Fprint(p.Output, out) + } else { + fmt.Print(out) + } + p.lastBarsCount = len(p.bars) return isFinished } diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go b/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go index d52edd365..c617c55ec 100644 --- a/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go +++ b/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go @@ -11,7 +11,7 @@ var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d") func escapeAwareRuneCountInString(s string) int { n := runewidth.StringWidth(s) for _, sm := range ctrlFinder.FindAllString(s, -1) { - n -= len(sm) + n -= runewidth.StringWidth(sm) } return n } diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go b/vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go deleted file mode 100644 index ebb3fe87c..000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build linux solaris -// +build !appengine - -package pb - -const ioctlReadTermios = 0x5401 // syscall.TCGETS -const ioctlWriteTermios = 0x5402 // syscall.TCSETS diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go b/vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go new file mode 100644 index 000000000..b10f61859 --- /dev/null +++ b/vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux solaris +// +build !appengine + +package pb + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS +const ioctlWriteTermios = unix.TCSETS -- cgit v1.2.3-54-g00ecf