diff options
Diffstat (limited to 'vendor')
124 files changed, 1600 insertions, 735 deletions
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index 8432dbe32..0b0fbc004 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -8,13 +8,13 @@ import ( "io/ioutil" "os" "reflect" - "runtime" "strings" "sync" "time" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/pkg/platform" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache" "github.com/containers/image/v5/pkg/compression" @@ -356,11 +356,11 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur if err != nil { return nil, "", errors.Wrapf(err, "Error reading manifest list") } - list, err := manifest.ListFromBlob(manifestList, manifestType) + originalList, err := manifest.ListFromBlob(manifestList, manifestType) if err != nil { return nil, "", errors.Wrapf(err, "Error parsing manifest list %q", string(manifestList)) } - originalList := list.Clone() + updatedList := originalList.Clone() // Read and/or clear the set of signatures for this list. var sigs [][]byte @@ -390,18 +390,18 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur case imgspecv1.MediaTypeImageManifest: forceListMIMEType = imgspecv1.MediaTypeImageIndex } - selectedListType, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType) + selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType) if err != nil { return nil, "", errors.Wrapf(err, "Error determining manifest list type to write to destination") } - if selectedListType != list.MIMEType() { + if selectedListType != originalList.MIMEType() { if !canModifyManifestList { return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType) } } // Copy each image, or just the ones we want to copy, in turn. - instanceDigests := list.Instances() + instanceDigests := updatedList.Instances() imagesToCopy := len(instanceDigests) if options.ImageListSelection == CopySpecificImages { imagesToCopy = len(options.Instances) @@ -419,7 +419,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur } } if skip { - update, err := list.Instance(instanceDigest) + update, err := updatedList.Instance(instanceDigest) if err != nil { return nil, "", err } @@ -447,42 +447,58 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur } // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made. - if err = list.UpdateInstances(updates); err != nil { + if err = updatedList.UpdateInstances(updates); err != nil { return nil, "", errors.Wrapf(err, "Error updating manifest list") } - // Perform the list conversion. - if selectedListType != list.MIMEType() { - list, err = list.ConvertToMIMEType(selectedListType) + // Iterate through supported list types, preferred format first. + c.Printf("Writing manifest list to image destination\n") + var errs []string + for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) { + attemptedList := updatedList + + logrus.Debugf("Trying to use manifest list type %s…", thisListType) + + // Perform the list conversion, if we need one. + if thisListType != updatedList.MIMEType() { + attemptedList, err = updatedList.ConvertToMIMEType(thisListType) + if err != nil { + return nil, "", errors.Wrapf(err, "Error converting manifest list to list with MIME type %q", thisListType) + } + } + + // Check if the updates or a type conversion meaningfully changed the list of images + // by serializing them both so that we can compare them. + attemptedManifestList, err := attemptedList.Serialize() if err != nil { - return nil, "", errors.Wrapf(err, "Error converting manifest list to list with MIME type %q", selectedListType) + return nil, "", errors.Wrapf(err, "Error encoding updated manifest list (%q: %#v)", updatedList.MIMEType(), updatedList.Instances()) + } + originalManifestList, err := originalList.Serialize() + if err != nil { + return nil, "", errors.Wrapf(err, "Error encoding original manifest list for comparison (%q: %#v)", originalList.MIMEType(), originalList.Instances()) } - } - // Check if the updates or a type conversion meaningfully changed the list of images - // by serializing them both so that we can compare them. - updatedManifestList, err := list.Serialize() - if err != nil { - return nil, "", errors.Wrapf(err, "Error encoding updated manifest list (%q: %#v)", list.MIMEType(), list.Instances()) - } - originalManifestList, err := originalList.Serialize() - if err != nil { - return nil, "", errors.Wrapf(err, "Error encoding original manifest list for comparison (%q: %#v)", originalList.MIMEType(), originalList.Instances()) - } + // If we can't just use the original value, but we have to change it, flag an error. + if !bytes.Equal(attemptedManifestList, originalManifestList) { + if !canModifyManifestList { + return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", thisListType) + } + logrus.Debugf("Manifest list has been updated") + } - // If we can't just use the original value, but we have to change it, flag an error. - if !bytes.Equal(updatedManifestList, originalManifestList) { - if !canModifyManifestList { - return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType) + // Save the manifest list. + err = c.dest.PutManifest(ctx, attemptedManifestList, nil) + if err != nil { + logrus.Debugf("Upload of manifest list type %s failed: %v", thisListType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", thisListType, err)) + continue } - manifestList = updatedManifestList - logrus.Debugf("Manifest list has been updated") + errs = nil + manifestList = attemptedManifestList + break } - - // Save the manifest list. - c.Printf("Writing manifest list to image destination\n") - if err = c.dest.PutManifest(ctx, manifestList, nil); err != nil { - return nil, "", errors.Wrapf(err, "Error writing manifest list %q", string(manifestList)) + if errs != nil { + return nil, "", fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", ")) } // Sign the manifest list. @@ -527,15 +543,6 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli return nil, "", "", errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference())) } - // TODO: Remove src.SupportsEncryption call and interface once copyUpdatedConfigAndManifest does not depend on source Image manifest type - // Currently, the way copyUpdatedConfigAndManifest updates the manifest is to apply updates to the source manifest and call PutManifest - // of the modified source manifest. The implication is that schemas like docker2 cannot be encrypted even though the destination - // supports encryption because docker2 struct does not have annotations, which are required. - // Reference to issue: https://github.com/containers/image/issues/746 - if options.OciEncryptLayers != nil && !src.SupportsEncryption(ctx) { - return nil, "", "", errors.Errorf("Encryption request but not supported by source transport %s", src.Reference().Transport().Name()) - } - // If the destination is a digested reference, make a note of that, determine what digest value we're // expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's // one item from a manifest list that matches it, accept that as a match. @@ -708,21 +715,26 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst if err != nil { return errors.Wrapf(err, "Error parsing image configuration") } - - wantedOS := runtime.GOOS - if sys != nil && sys.OSChoice != "" { - wantedOS = sys.OSChoice - } - if wantedOS != c.OS { - logrus.Infof("Image operating system mismatch: image uses %q, expecting %q", c.OS, wantedOS) - } - - wantedArch := runtime.GOARCH - if sys != nil && sys.ArchitectureChoice != "" { - wantedArch = sys.ArchitectureChoice + wantedPlatforms, err := platform.WantedPlatforms(sys) + if err != nil { + return errors.Wrapf(err, "error getting current platform information %#v", sys) + } + + options := newOrderedSet() + match := false + for _, wantedPlatform := range wantedPlatforms { + // Waiting for https://github.com/opencontainers/image-spec/pull/777 : + // This currently can’t use image.MatchesPlatform because we don’t know what to use + // for image.Variant. + if wantedPlatform.OS == c.OS && wantedPlatform.Architecture == c.Architecture { + match = true + break + } + options.append(fmt.Sprintf("%s+%s", wantedPlatform.OS, wantedPlatform.Architecture)) } - if wantedArch != c.Architecture { - logrus.Infof("Image architecture mismatch: image uses %q, expecting %q", c.Architecture, wantedArch) + if !match { + logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q, expecting one of %q", + c.OS, c.Architecture, strings.Join(options.list, ", ")) } } return nil @@ -833,21 +845,24 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { } } - func() { // A scope for defer + if err := func() error { // A scope for defer progressPool, progressCleanup := ic.c.newProgressPool(ctx) defer progressCleanup() for i, srcLayer := range srcInfos { err = copySemaphore.Acquire(ctx, 1) if err != nil { - logrus.Debug("Can't acquire semaphoer", err) + return errors.Wrapf(err, "Can't acquire semaphore") } go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool) } // Wait for all layers to be copied copyGroup.Wait() - }() + return nil + }(); err != nil { + return err + } destInfos := make([]types.BlobInfo, numLayers) diffIDs := make([]digest.Digest, numLayers) @@ -1006,7 +1021,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error { return destInfo, nil }() if err != nil { - return nil + return err } if destInfo.Digest != srcInfo.Digest { return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go index 5a3cf06a4..0c0164cbf 100644 --- a/vendor/github.com/containers/image/v5/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -15,7 +15,7 @@ import ( // Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} -// orderedSet is a list of strings (MIME types in our case), with each string appearing at most once. +// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once. type orderedSet struct { list []string included map[string]struct{} @@ -125,8 +125,10 @@ func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) { // determineListConversion takes the current MIME type of a list of manifests, // the list of MIME types supported for a given destination, and a possible // forced value, and returns the MIME type to which we should convert the list -// of manifests, whether we are converting to it or using it unmodified. -func (c *copier) determineListConversion(currentListMIMEType string, destSupportedMIMETypes []string, forcedListMIMEType string) (string, error) { +// of manifests (regardless of whether we are converting to it or using it +// unmodified) and a slice of other list types which might be supported by the +// destination. +func (c *copier) determineListConversion(currentListMIMEType string, destSupportedMIMETypes []string, forcedListMIMEType string) (string, []string, error) { // If there's no list of supported types, then anything we support is expected to be supported. if len(destSupportedMIMETypes) == 0 { destSupportedMIMETypes = manifest.SupportedListMIMETypes @@ -136,6 +138,7 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport destSupportedMIMETypes = []string{forcedListMIMEType} } var selectedType string + var otherSupportedTypes []string for i := range destSupportedMIMETypes { // The second priority is the first member of the list of acceptable types that is a list, // but keep going in case current type occurs later in the list. @@ -148,15 +151,21 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport selectedType = destSupportedMIMETypes[i] } } + // Pick out the other list types that we support. + for i := range destSupportedMIMETypes { + if selectedType != destSupportedMIMETypes[i] && manifest.MIMETypeIsMultiImage(destSupportedMIMETypes[i]) { + otherSupportedTypes = append(otherSupportedTypes, destSupportedMIMETypes[i]) + } + } logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", ")) if selectedType == "" { - return "", errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes) + return "", nil, errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes) } if selectedType != currentListMIMEType { - logrus.Debugf("... will convert to %s", selectedType) + logrus.Debugf("... will convert to %s first, and then try %v", selectedType, otherSupportedTypes) } else { - logrus.Debugf("... will use the original manifest list type") + logrus.Debugf("... will use the original manifest list type, and then try %v", otherSupportedTypes) } // Done. - return selectedType, nil + return selectedType, otherSupportedTypes, nil } diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go index 46c01891f..26bc687e0 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/transport.go +++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go @@ -41,10 +41,10 @@ func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error { // archiveReference is an ImageReference for Docker images. type archiveReference struct { - // only used for destinations + path string + // only used for destinations, // archiveReference.destinationRef is optional and can be nil for destinations as well. destinationRef reference.NamedTagged - path string } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. @@ -64,11 +64,6 @@ func ParseReference(refString string) (types.ImageReference, error) { return nil, errors.Wrapf(err, "docker-archive parsing reference") } ref = reference.TagNameOnly(ref) - - if _, isDigest := ref.(reference.Canonical); isDigest { - return nil, errors.Errorf("docker-archive doesn't support digest references: %s", refString) - } - refTagged, isTagged := ref.(reference.NamedTagged) if !isTagged { // Really shouldn't be hit... @@ -77,9 +72,20 @@ func ParseReference(refString string) (types.ImageReference, error) { destinationRef = refTagged } + return NewReference(path, destinationRef) +} + +// NewReference rethrns a Docker archive reference for a path and an optional destination reference. +func NewReference(path string, destinationRef reference.NamedTagged) (types.ImageReference, error) { + if strings.Contains(path, ":") { + return nil, errors.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path) + } + if _, isDigest := destinationRef.(reference.Canonical); isDigest { + return nil, errors.Errorf("docker-archive doesn't support digest references: %s", destinationRef.String()) + } return archiveReference{ - destinationRef: destinationRef, path: path, + destinationRef: destinationRef, }, nil } diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index aa8463d18..c316bdeec 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -1,11 +1,13 @@ package docker import ( + "bytes" "context" "crypto/tls" "encoding/json" "fmt" "io" + "io/ioutil" "net/http" "net/url" "os" @@ -21,6 +23,7 @@ import ( "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/pkg/tlsclientconfig" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" clientLib "github.com/docker/distribution/registry/client" "github.com/docker/go-connections/tlsconfig" digest "github.com/opencontainers/go-digest" @@ -51,7 +54,18 @@ const ( backoffMaxDelay = 60 * time.Second ) -var systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"} +type certPath struct { + path string + absolute bool +} + +var ( + homeCertDir = filepath.FromSlash(".config/containers/certs.d") + perHostCertDirs = []certPath{ + {path: "/etc/containers/certs.d", absolute: true}, + {path: "/etc/docker/certs.d", absolute: true}, + } +) // extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: // signature represents a Docker image signature. @@ -85,8 +99,8 @@ type dockerClient struct { // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. tlsClientConfig *tls.Config // The following members are not set by newDockerClient and must be set by callers if needed. - username string - password string + auth types.DockerAuthConfig + registryToken string signatureBase signatureStorageBase scope authScope @@ -166,11 +180,12 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { hostCertDir string fullCertDirPath string ) - for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths { - if sys != nil && sys.RootForImplicitAbsolutePaths != "" { - hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) + + for _, perHostCertDir := range append([]certPath{{path: filepath.Join(homedir.Get(), homeCertDir), absolute: false}}, perHostCertDirs...) { + if sys != nil && sys.RootForImplicitAbsolutePaths != "" && perHostCertDir.absolute { + hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, perHostCertDir.path) } else { - hostCertDir = systemPerHostCertDirPath + hostCertDir = perHostCertDir.path } fullCertDirPath = filepath.Join(hostCertDir, hostPort) @@ -196,10 +211,11 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { // “write†specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { registry := reference.Domain(ref.ref) - username, password, err := config.GetAuthentication(sys, registry) + auth, err := config.GetCredentials(sys, registry) if err != nil { return nil, errors.Wrapf(err, "error getting username and password") } + sigBase, err := configuredSignatureStorageBase(sys, ref, write) if err != nil { return nil, err @@ -209,8 +225,10 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write if err != nil { return nil, err } - client.username = username - client.password = password + client.auth = auth + if sys != nil { + client.registryToken = sys.DockerBearerRegistryToken + } client.signatureBase = sigBase client.scope.actions = actions client.scope.remoteName = reference.Path(ref.ref) @@ -252,7 +270,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc } if reg != nil { if reg.Blocked { - return nil, fmt.Errorf("registry %s is blocked in %s", reg.Prefix, sysregistriesv2.ConfigPath(sys)) + return nil, fmt.Errorf("registry %s is blocked in %s or %s", reg.Prefix, sysregistriesv2.ConfigPath(sys), sysregistriesv2.ConfigDirPath(sys)) } skipVerify = reg.Insecure } @@ -272,8 +290,10 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password if err != nil { return errors.Wrapf(err, "error creating new docker client") } - client.username = username - client.password = password + client.auth = types.DockerAuthConfig{ + Username: username, + Password: password, + } resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil) if err != nil { @@ -315,7 +335,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima v1Res := &V1Results{} // Get credentials from authfile for the underlying hostname - username, password, err := config.GetAuthentication(sys, registry) + auth, err := config.GetCredentials(sys, registry) if err != nil { return nil, errors.Wrapf(err, "error getting username and password") } @@ -333,8 +353,10 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima if err != nil { return nil, errors.Wrapf(err, "error creating new docker client") } - client.username = username - client.password = password + client.auth = auth + if sys != nil { + client.registryToken = sys.DockerBearerRegistryToken + } // Only try the v1 search endpoint if the search query is not empty. If it is // empty skip to the v2 endpoint. @@ -515,30 +537,43 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope schemeNames = append(schemeNames, challenge.Scheme) switch challenge.Scheme { case "basic": - req.SetBasicAuth(c.username, c.password) + req.SetBasicAuth(c.auth.Username, c.auth.Password) return nil case "bearer": - cacheKey := "" - scopes := []authScope{c.scope} - if extraScope != nil { - // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons). - cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions) - scopes = append(scopes, *extraScope) - } - var token bearerToken - t, inCache := c.tokenCache.Load(cacheKey) - if inCache { - token = t.(bearerToken) - } - if !inCache || time.Now().After(token.expirationTime) { - t, err := c.getBearerToken(req.Context(), challenge, scopes) - if err != nil { - return err + registryToken := c.registryToken + if registryToken == "" { + cacheKey := "" + scopes := []authScope{c.scope} + if extraScope != nil { + // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons). + cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions) + scopes = append(scopes, *extraScope) + } + var token bearerToken + t, inCache := c.tokenCache.Load(cacheKey) + if inCache { + token = t.(bearerToken) } - token = *t - c.tokenCache.Store(cacheKey, token) + if !inCache || time.Now().After(token.expirationTime) { + var ( + t *bearerToken + err error + ) + if c.auth.IdentityToken != "" { + t, err = c.getBearerTokenOAuth2(req.Context(), challenge, scopes) + } else { + t, err = c.getBearerToken(req.Context(), challenge, scopes) + } + if err != nil { + return err + } + + token = *t + c.tokenCache.Store(cacheKey, token) + } + registryToken = token.Token } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token)) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", registryToken)) return nil default: logrus.Debugf("no handler for %s authentication", challenge.Scheme) @@ -548,48 +583,96 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope return nil } -func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) { +func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge challenge, + scopes []authScope) (*bearerToken, error) { + realm, ok := challenge.Parameters["realm"] + if !ok { + return nil, errors.Errorf("missing realm in bearer auth challenge") + } + + authReq, err := http.NewRequest(http.MethodPost, realm, nil) + if err != nil { + return nil, err + } + + authReq = authReq.WithContext(ctx) + + // Make the form data required against the oauth2 authentication + // More details here: https://docs.docker.com/registry/spec/auth/oauth/ + params := authReq.URL.Query() + if service, ok := challenge.Parameters["service"]; ok && service != "" { + params.Add("service", service) + } + for _, scope := range scopes { + if scope.remoteName != "" && scope.actions != "" { + params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions)) + } + } + params.Add("grant_type", "refresh_token") + params.Add("refresh_token", c.auth.IdentityToken) + + authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode())) + authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded") + logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) + res, err := c.client.Do(authReq) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := httpResponseToError(res, "Trying to obtain access token"); err != nil { + return nil, err + } + + tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) + if err != nil { + return nil, err + } + + return newBearerTokenFromJSONBlob(tokenBlob) +} + +func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, + scopes []authScope) (*bearerToken, error) { realm, ok := challenge.Parameters["realm"] if !ok { return nil, errors.Errorf("missing realm in bearer auth challenge") } - authReq, err := http.NewRequest("GET", realm, nil) + authReq, err := http.NewRequest(http.MethodGet, realm, nil) if err != nil { return nil, err } + authReq = authReq.WithContext(ctx) - getParams := authReq.URL.Query() - if c.username != "" { - getParams.Add("account", c.username) + params := authReq.URL.Query() + if c.auth.Username != "" { + params.Add("account", c.auth.Username) } + if service, ok := challenge.Parameters["service"]; ok && service != "" { - getParams.Add("service", service) + params.Add("service", service) } + for _, scope := range scopes { if scope.remoteName != "" && scope.actions != "" { - getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions)) + params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions)) } } - authReq.URL.RawQuery = getParams.Encode() - if c.username != "" && c.password != "" { - authReq.SetBasicAuth(c.username, c.password) + + authReq.URL.RawQuery = params.Encode() + + if c.auth.Username != "" && c.auth.Password != "" { + authReq.SetBasicAuth(c.auth.Username, c.auth.Password) } + logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) res, err := c.client.Do(authReq) if err != nil { return nil, err } defer res.Body.Close() - switch res.StatusCode { - case http.StatusUnauthorized: - err := clientLib.HandleErrorResponse(res) - logrus.Debugf("Server response when trying to obtain an access token: \n%q", err.Error()) - return nil, ErrUnauthorizedForCredentials{Err: err} - case http.StatusOK: - break - default: - return nil, errors.Errorf("unexpected http code: %d (%s), URL: %s", res.StatusCode, http.StatusText(res.StatusCode), authReq.URL) + if err := httpResponseToError(res, "Requesting bear token"); err != nil { + return nil, err } tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) if err != nil { diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 967845e72..9c0c20c64 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -108,6 +108,7 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, pullSo if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(ref.ref) != primaryDomain { copy := *endpointSys copy.DockerAuthConfig = nil + copy.DockerBearerRegistryToken = "" endpointSys = © } diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/tarfile/src.go index c1f1a0f27..4d2368c70 100644 --- a/vendor/github.com/containers/image/v5/docker/tarfile/src.go +++ b/vendor/github.com/containers/image/v5/docker/tarfile/src.go @@ -249,6 +249,9 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error { if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config) } + if parsedConfig.RootFS == nil { + return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest[0].Config) + } knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig) if err != nil { diff --git a/vendor/github.com/containers/image/v5/image/docker_schema1.go b/vendor/github.com/containers/image/v5/image/docker_schema1.go index 48ddb174e..5f24970c3 100644 --- a/vendor/github.com/containers/image/v5/image/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/image/docker_schema1.go @@ -56,7 +56,7 @@ func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) { // layers in the resulting configuration isn't guaranteed to be returned to due how // old image manifests work (docker v2s1 especially). func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - v2s2, err := m.convertToManifestSchema2(nil, nil) + v2s2, err := m.convertToManifestSchema2(ctx, &types.ManifestUpdateOptions{}) if err != nil { return nil, err } @@ -107,6 +107,24 @@ func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp // This does not change the state of the original Image object. func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} + + // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, + // handle conversions between them by doing nothing. + if options.ManifestMIMEType != manifest.DockerV2Schema1MediaType && options.ManifestMIMEType != manifest.DockerV2Schema1SignedMediaType { + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, + manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + } + + // No conversion required, update manifest if options.LayerInfos != nil { if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { return nil, err @@ -121,36 +139,30 @@ func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.Manife } } - switch options.ManifestMIMEType { - case "": // No conversion, OK - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, - // handle conversions between them by doing nothing. - case manifest.DockerV2Schema2MediaType: - m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) - if err != nil { - return nil, err - } - return memoryImageFromManifest(m2), nil - case imgspecv1.MediaTypeImageManifest: - // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest - m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) - if err != nil { - return nil, err - } - return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{ - ManifestMIMEType: imgspecv1.MediaTypeImageManifest, - InformationOnly: options.InformationOnly, - }) - default: - return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType) - } - return memoryImageFromManifest(©), nil } +// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +// +// We need this function just because a function returning an implementation of the genericManifest +// interface is not automatically assignable to a function type returning the genericManifest interface +func (m *manifestSchema1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + return m.convertToManifestSchema2(ctx, options) +} + +// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +// // Based on github.com/docker/docker/distribution/pull_v2.go -func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) { +func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) { + uploadedLayerInfos := options.InformationOnly.LayerInfos + layerDiffIDs := options.InformationOnly.LayerDiffIDs + if len(m.m.ExtractedV1Compatibility) == 0 { // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) @@ -165,6 +177,15 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) } + var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil + if options.LayerInfos != nil { + if len(options.LayerInfos) != len(m.m.FSLayers) { + return nil, errors.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", + len(options.LayerInfos), len(m.m.FSLayers)) + } + convertedLayerUpdates = []types.BlobInfo{} + } + // Build a list of the diffIDs for the non-empty layers. diffIDs := []digest.Digest{} var layers []manifest.Schema2Descriptor @@ -185,6 +206,9 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl Size: size, Digest: m.m.FSLayers[v1Index].BlobSum, }) + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[v2Index]) + } diffIDs = append(diffIDs, d) } } @@ -198,9 +222,26 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl Digest: digest.FromBytes(configJSON), } + if options.LayerInfos != nil { + options.LayerInfos = convertedLayerUpdates + } return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil } +// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest + m2, err := m.convertToManifestSchema2(ctx, options) + if err != nil { + return nil, err + } + + return m2.convertToManifestOCI1(ctx, options) +} + // SupportsEncryption returns if encryption is supported for the manifest type func (m *manifestSchema1) SupportsEncryption(context.Context) bool { return false diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go index 1e2114d7e..e4e01d5d9 100644 --- a/vendor/github.com/containers/image/v5/image/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go @@ -50,7 +50,7 @@ func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (ge } // manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: -func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest { +func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 { return &manifestSchema2{ src: src, configBlob: configBlob, @@ -160,6 +160,21 @@ func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.Manife configBlob: m.configBlob, m: manifest.Schema2Clone(m.m), } + + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, + manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, + imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + + // No conversion required, update manifest if options.LayerInfos != nil { if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { return nil, err @@ -167,16 +182,6 @@ func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.Manife } // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. - switch options.ManifestMIMEType { - case "": // No conversion, OK - case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType: - return copy.convertToManifestSchema1(ctx, options.InformationOnly.Destination) - case imgspecv1.MediaTypeImageManifest: - return copy.convertToManifestOCI1(ctx) - default: - return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType) - } - return memoryImageFromManifest(©), nil } @@ -189,7 +194,11 @@ func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1 } } -func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context) (types.Image, error) { +// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema2 object. +func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) { configOCI, err := m.OCIConfig(ctx) if err != nil { return nil, err @@ -222,12 +231,27 @@ func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context) (types.Imag } } - m1 := manifestOCI1FromComponents(config, m.src, configOCIBytes, layers) - return memoryImageFromManifest(m1), nil + return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil } +// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema2 object. +// // Based on docker/distribution/manifest/schema1/config_builder.go -func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest types.ImageDestination) (types.Image, error) { +func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + dest := options.InformationOnly.Destination + + var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil + if options.LayerInfos != nil { + if len(options.LayerInfos) != len(m.m.LayersDescriptors) { + return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", + len(options.LayerInfos), len(m.m.LayersDescriptors)) + } + convertedLayerUpdates = []types.BlobInfo{} + } + configBytes, err := m.ConfigBlob(ctx) if err != nil { return nil, err @@ -254,24 +278,32 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ var blobDigest digest.Digest if historyEntry.EmptyLayer { + emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))} + if !haveGzippedEmptyLayer { logrus.Debugf("Uploading empty layer during conversion to schema 1") // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. - info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, none.NoCache, false) + info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false) if err != nil { return nil, errors.Wrap(err, "Error uploading empty layer") } - if info.Digest != GzippedEmptyLayerDigest { - return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, GzippedEmptyLayerDigest) + if info.Digest != emptyLayerBlobInfo.Digest { + return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest) } haveGzippedEmptyLayer = true } - blobDigest = GzippedEmptyLayerDigest + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo) + } + blobDigest = emptyLayerBlobInfo.Digest } else { if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) } + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex]) + } blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest nonemptyLayerIndex++ } @@ -313,11 +345,14 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ } history[0].V1Compatibility = string(v1Config) + if options.LayerInfos != nil { + options.LayerInfos = convertedLayerUpdates + } m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) if err != nil { return nil, err // This should never happen, we should have created all the components correctly. } - return memoryImageFromManifest(m1), nil + return m1, nil } func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { diff --git a/vendor/github.com/containers/image/v5/image/manifest.go b/vendor/github.com/containers/image/v5/image/manifest.go index c574fa9fc..36d70b5c2 100644 --- a/vendor/github.com/containers/image/v5/image/manifest.go +++ b/vendor/github.com/containers/image/v5/image/manifest.go @@ -8,6 +8,7 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) // genericManifest is an interface for parsing, modifying image manifests and related data. @@ -45,6 +46,10 @@ type genericManifest interface { // This does not change the state of the original Image object. UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) // SupportsEncryption returns if encryption is supported for the manifest type + // + // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since + // the process of updating a manifest between different manifest types was to update then convert. + // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 SupportsEncryption(ctx context.Context) bool } @@ -75,3 +80,34 @@ func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo } return blobs } + +// manifestConvertFn (a method of genericManifest object) returns a genericManifest implementation +// converted to a specific manifest MIME type. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original genericManifest object. +type manifestConvertFn func(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) + +// convertManifestIfRequiredWithUpdate will run conversion functions of a manifest if +// required and re-apply the options to the converted type. +// It returns (nil, nil) if no conversion was requested. +func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) { + if options.ManifestMIMEType == "" { + return nil, nil + } + + converter, ok := converters[options.ManifestMIMEType] + if !ok { + return nil, errors.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType) + } + + optionsCopy := options + convertedManifest, err := converter(ctx, &optionsCopy) + if err != nil { + return nil, err + } + convertedImage := memoryImageFromManifest(convertedManifest) + + optionsCopy.ManifestMIMEType = "" + return convertedImage.UpdatedImage(ctx, optionsCopy) +} diff --git a/vendor/github.com/containers/image/v5/image/oci.go b/vendor/github.com/containers/image/v5/image/oci.go index b5ddb9aaa..5cb04f979 100644 --- a/vendor/github.com/containers/image/v5/image/oci.go +++ b/vendor/github.com/containers/image/v5/image/oci.go @@ -140,6 +140,21 @@ func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestU configBlob: m.configBlob, m: manifest.OCI1Clone(m.m), } + + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, + manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, + manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + + // No conversion required, update manifest if options.LayerInfos != nil { if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { return nil, err @@ -147,24 +162,6 @@ func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestU } // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. - switch options.ManifestMIMEType { - case "": // No conversion, OK - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - // We can't directly convert to V1, but we can transitively convert via a V2 image - m2, err := copy.convertToManifestSchema2() - if err != nil { - return nil, err - } - return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{ - ManifestMIMEType: options.ManifestMIMEType, - InformationOnly: options.InformationOnly, - }) - case manifest.DockerV2Schema2MediaType: - return copy.convertToManifestSchema2() - default: - return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType) - } - return memoryImageFromManifest(©), nil } @@ -177,7 +174,22 @@ func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema } } -func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { +// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +// +// We need this function just because a function returning an implementation of the genericManifest +// interface is not automatically assignable to a function type returning the genericManifest interface +func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + return m.convertToManifestSchema2(ctx, options) +} + +// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestOCI1 object. +func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) { // Create a copy of the descriptor. config := schema2DescriptorFromOCI1Descriptor(m.m.Config) @@ -209,8 +221,21 @@ func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { // Rather than copying the ConfigBlob now, we just pass m.src to the // translated manifest, since the only difference is the mediatype of // descriptors there is no change to any blob stored in m.src. - m1 := manifestSchema2FromComponents(config, m.src, nil, layers) - return memoryImageFromManifest(m1), nil + return manifestSchema2FromComponents(config, m.src, nil, layers), nil +} + +// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestOCI1 object. +func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + // We can't directly convert to V1, but we can transitively convert via a V2 image + m2, err := m.convertToManifestSchema2(ctx, options) + if err != nil { + return nil, err + } + + return m2.convertToManifestSchema1(ctx, options) } // SupportsEncryption returns if encryption is supported for the manifest type diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go new file mode 100644 index 000000000..1f6c4fa71 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go @@ -0,0 +1,203 @@ +package platform + +// Largely based on +// https://github.com/moby/moby/blob/bc846d2e8fe5538220e0c31e9d0e8446f6fbc022/distribution/cpuinfo_unix.go +// Copyright 2012-2017 Docker, Inc. +// +// https://github.com/containerd/containerd/blob/726dcaea50883e51b2ec6db13caff0e7936b711d/platforms/cpuinfo.go +// Copyright The containerd Authors. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bufio" + "fmt" + "os" + "runtime" + "strings" + + "github.com/containers/image/v5/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + if runtime.GOOS != "linux" { + return "", fmt.Errorf("getCPUInfo for OS %s not implemented", runtime.GOOS) + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern) +} + +func getCPUVariantWindows() string { + // Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + var variant string + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "" + } + + return variant +} + +func getCPUVariantArm() string { + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + return "" + } + // TODO handle RPi Zero mismatch (https://github.com/moby/moby/pull/36121#issuecomment-398328286) + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "" + } + + return variant +} + +func getCPUVariant(os string, arch string) string { + if os == "windows" { + return getCPUVariantWindows() + } + if arch == "arm" || arch == "arm64" { + return getCPUVariantArm() + } + return "" +} + +var compatibility = map[string][]string{ + "arm": {"v7", "v6", "v5"}, + "arm64": {"v8"}, +} + +// Returns all compatible platforms with the platform specifics possibly overriden by user, +// the most compatible platform is first. +// If some option (arch, os, variant) is not present, a value from current platform is detected. +func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { + wantedArch := runtime.GOARCH + wantedVariant := "" + if ctx != nil && ctx.ArchitectureChoice != "" { + wantedArch = ctx.ArchitectureChoice + } else { + // Only auto-detect the variant if we are using the default architecture. + // If the user has specified the ArchitectureChoice, don't autodetect, even if + // ctx.ArchitectureChoice == runtime.GOARCH, because we have no idea whether the runtime.GOARCH + // value is relevant to the use case, and if we do autodetect a variant, + // ctx.VariantChoice can't be used to override it back to "". + wantedVariant = getCPUVariant(runtime.GOOS, runtime.GOARCH) + } + if ctx != nil && ctx.VariantChoice != "" { + wantedVariant = ctx.VariantChoice + } + + wantedOS := runtime.GOOS + if ctx != nil && ctx.OSChoice != "" { + wantedOS = ctx.OSChoice + } + + var wantedPlatforms []imgspecv1.Platform + if wantedVariant != "" && compatibility[wantedArch] != nil { + wantedPlatforms = make([]imgspecv1.Platform, 0, len(compatibility[wantedArch])) + wantedIndex := -1 + for i, v := range compatibility[wantedArch] { + if wantedVariant == v { + wantedIndex = i + break + } + } + // user wants a variant which we know nothing about - not even compatibility + if wantedIndex == -1 { + wantedPlatforms = []imgspecv1.Platform{ + { + OS: wantedOS, + Architecture: wantedArch, + Variant: wantedVariant, + }, + } + } else { + for i := wantedIndex; i < len(compatibility[wantedArch]); i++ { + v := compatibility[wantedArch][i] + wantedPlatforms = append(wantedPlatforms, imgspecv1.Platform{ + OS: wantedOS, + Architecture: wantedArch, + Variant: v, + }) + } + } + } else { + wantedPlatforms = []imgspecv1.Platform{ + { + OS: wantedOS, + Architecture: wantedArch, + Variant: wantedVariant, + }, + } + } + + return wantedPlatforms, nil +} + +func MatchesPlatform(image imgspecv1.Platform, wanted imgspecv1.Platform) bool { + if image.Architecture != wanted.Architecture { + return false + } + if image.OS != wanted.OS { + return false + } + + if wanted.Variant == "" || image.Variant == wanted.Variant { + return true + } + + return false +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go index 453976c48..5f96a981a 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go @@ -3,8 +3,8 @@ package manifest import ( "encoding/json" "fmt" - "runtime" + platform "github.com/containers/image/v5/internal/pkg/platform" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -81,9 +81,6 @@ func (list *Schema2List) UpdateInstances(updates []ListUpdate) error { if updates[i].MediaType == "" { return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType) } - if err := SupportedSchema2MediaType(updates[i].MediaType); err != nil && SupportedOCI1MediaType(updates[i].MediaType) != nil { - return errors.Wrapf(err, "update %d of %d passed to Schema2List.UpdateInstances had an unsupported media type (was %q): %q", i+1, len(updates), list.Manifests[i].MediaType, updates[i].MediaType) - } list.Manifests[i].MediaType = updates[i].MediaType } return nil @@ -92,21 +89,25 @@ func (list *Schema2List) UpdateInstances(updates []ListUpdate) error { // ChooseInstance parses blob as a schema2 manifest list, and returns the digest // of the image which is appropriate for the current environment. func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { - wantedArch := runtime.GOARCH - if ctx != nil && ctx.ArchitectureChoice != "" { - wantedArch = ctx.ArchitectureChoice - } - wantedOS := runtime.GOOS - if ctx != nil && ctx.OSChoice != "" { - wantedOS = ctx.OSChoice + wantedPlatforms, err := platform.WantedPlatforms(ctx) + if err != nil { + return "", errors.Wrapf(err, "error getting platform information %#v", ctx) } - - for _, d := range list.Manifests { - if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { - return d.Digest, nil + for _, wantedPlatform := range wantedPlatforms { + for _, d := range list.Manifests { + imagePlatform := imgspecv1.Platform{ + Architecture: d.Platform.Architecture, + OS: d.Platform.OS, + OSVersion: d.Platform.OSVersion, + OSFeatures: dupStringSlice(d.Platform.OSFeatures), + Variant: d.Platform.Variant, + } + if platform.MatchesPlatform(imagePlatform, wantedPlatform) { + return d.Digest, nil + } } } - return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS) + return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %s, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) } // Serialize returns the list in a blob format. diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index 2d27d9433..aafe6693b 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -32,7 +32,14 @@ type OCI1 struct { imgspecv1.Manifest } -// SupportedOCI1MediaType checks if the specified string is a supported OCI1 media type. +// SupportedOCI1MediaType checks if the specified string is a supported OCI1 +// media type. +// +// Deprecated: blindly rejecting unknown MIME types when the consumer does not +// need to process the input just reduces interoperability (and violates the +// standard) with no benefit, and that this function does not check that the +// media type is appropriate for any specific purpose, so it’s not all that +// useful for validation anyway. func SupportedOCI1MediaType(m string) error { switch m { case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader, ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc: @@ -48,15 +55,6 @@ func OCI1FromManifest(manifest []byte) (*OCI1, error) { if err := json.Unmarshal(manifest, &oci1); err != nil { return nil, err } - // Check manifest's and layers' media types. - if err := SupportedOCI1MediaType(oci1.Config.MediaType); err != nil { - return nil, err - } - for _, layer := range oci1.Layers { - if err := SupportedOCI1MediaType(layer.MediaType); err != nil { - return nil, err - } - } return &oci1, nil } @@ -128,11 +126,6 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) for i, info := range layerInfos { mimeType := original[i].MediaType - // First make sure we support the media type of the original layer. - if err := SupportedOCI1MediaType(original[i].MediaType); err != nil { - return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType) - } - if info.CryptoOperation == types.Decrypt { decMimeType, err := getDecryptedMediaType(mimeType) if err != nil { diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go index 816503ce5..18cc8135c 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go @@ -5,6 +5,7 @@ import ( "fmt" "runtime" + platform "github.com/containers/image/v5/internal/pkg/platform" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" @@ -64,9 +65,6 @@ func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error { if updates[i].MediaType == "" { return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType) } - if err := SupportedOCI1MediaType(updates[i].MediaType); err != nil && SupportedSchema2MediaType(updates[i].MediaType) != nil && updates[i].MediaType != imgspecv1.MediaTypeImageIndex { - return errors.Wrapf(err, "update %d of %d passed to OCI1Index.UpdateInstances had an unsupported media type (was %q): %q", i+1, len(updates), index.Manifests[i].MediaType, updates[i].MediaType) - } index.Manifests[i].MediaType = updates[i].MediaType } return nil @@ -75,26 +73,31 @@ func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error { // ChooseInstance parses blob as an oci v1 manifest index, and returns the digest // of the image which is appropriate for the current environment. func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { - wantedArch := runtime.GOARCH - if ctx != nil && ctx.ArchitectureChoice != "" { - wantedArch = ctx.ArchitectureChoice - } - wantedOS := runtime.GOOS - if ctx != nil && ctx.OSChoice != "" { - wantedOS = ctx.OSChoice + wantedPlatforms, err := platform.WantedPlatforms(ctx) + if err != nil { + return "", errors.Wrapf(err, "error getting platform information %#v", ctx) } - - for _, d := range index.Manifests { - if d.Platform != nil && d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { - return d.Digest, nil + for _, wantedPlatform := range wantedPlatforms { + for _, d := range index.Manifests { + imagePlatform := imgspecv1.Platform{ + Architecture: d.Platform.Architecture, + OS: d.Platform.OS, + OSVersion: d.Platform.OSVersion, + OSFeatures: dupStringSlice(d.Platform.OSFeatures), + Variant: d.Platform.Variant, + } + if platform.MatchesPlatform(imagePlatform, wantedPlatform) { + return d.Digest, nil + } } } + for _, d := range index.Manifests { if d.Platform == nil { return d.Digest, nil } } - return "", fmt.Errorf("no image found in image index for architecture %s, OS %s", wantedArch, wantedOS) + return "", fmt.Errorf("no image found in image index for architecture %s, variant %s, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) } // Serialize returns the index in a blob format. diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index 585b75069..38244926f 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -65,7 +65,7 @@ type restConfig struct { BearerToken string // TLSClientConfig contains settings to enable transport layer security - restTLSClientConfig + TLSClientConfig restTLSClientConfig // Server should be accessed without verifying the TLS // certificate. For testing only. @@ -238,8 +238,8 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, conf // configClusterInfo holds the information identify the server provided by .kubeconfig configClientConfig := &restConfig{} - configClientConfig.CAFile = configClusterInfo.CertificateAuthority - configClientConfig.CAData = configClusterInfo.CertificateAuthorityData + configClientConfig.TLSClientConfig.CAFile = configClusterInfo.CertificateAuthority + configClientConfig.TLSClientConfig.CAData = configClusterInfo.CertificateAuthorityData configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify if err := mergo.MergeWithOverwrite(mergedConfig, configClientConfig); err != nil { return nil, err @@ -264,10 +264,10 @@ func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*rest mergedConfig.BearerToken = configAuthInfo.Token } if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { - mergedConfig.CertFile = configAuthInfo.ClientCertificate - mergedConfig.CertData = configAuthInfo.ClientCertificateData - mergedConfig.KeyFile = configAuthInfo.ClientKey - mergedConfig.KeyData = configAuthInfo.ClientKeyData + mergedConfig.TLSClientConfig.CertFile = configAuthInfo.ClientCertificate + mergedConfig.TLSClientConfig.CertData = configAuthInfo.ClientCertificateData + mergedConfig.TLSClientConfig.KeyFile = configAuthInfo.ClientKey + mergedConfig.TLSClientConfig.KeyData = configAuthInfo.ClientKeyData } if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { mergedConfig.Username = configAuthInfo.Username @@ -806,8 +806,8 @@ func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) { func defaultServerURLFor(config *restConfig) (*url.URL, error) { // TODO: move the default to secure when the apiserver supports TLS by default // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." - hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0 - hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0 + hasCA := len(config.TLSClientConfig.CAFile) != 0 || len(config.TLSClientConfig.CAData) != 0 + hasCert := len(config.TLSClientConfig.CertFile) != 0 || len(config.TLSClientConfig.CertData) != 0 defaultTLS := hasCA || hasCert || config.Insecure host := config.Host if host == "" { @@ -968,11 +968,11 @@ func tlsConfigFor(c *restConfig) (*tls.Config, error) { } if c.HasCA() { - tlsConfig.RootCAs = rootCertPool(c.CAData) + tlsConfig.RootCAs = rootCertPool(c.TLSClientConfig.CAData) } if c.HasCertAuth() { - cert, err := tls.X509KeyPair(c.CertData, c.KeyData) + cert, err := tls.X509KeyPair(c.TLSClientConfig.CertData, c.TLSClientConfig.KeyData) if err != nil { return nil, err } @@ -988,17 +988,17 @@ func tlsConfigFor(c *restConfig) (*tls.Config, error) { // either populated or were empty to start. func loadTLSFiles(c *restConfig) error { var err error - c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) + c.TLSClientConfig.CAData, err = dataFromSliceOrFile(c.TLSClientConfig.CAData, c.TLSClientConfig.CAFile) if err != nil { return err } - c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) + c.TLSClientConfig.CertData, err = dataFromSliceOrFile(c.TLSClientConfig.CertData, c.TLSClientConfig.CertFile) if err != nil { return err } - c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) + c.TLSClientConfig.KeyData, err = dataFromSliceOrFile(c.TLSClientConfig.KeyData, c.TLSClientConfig.KeyFile) if err != nil { return err } @@ -1042,13 +1042,13 @@ func rootCertPool(caData []byte) *x509.CertPool { // HasCA is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCA. // HasCA returns whether the configuration has a certificate authority or not. func (c *restConfig) HasCA() bool { - return len(c.CAData) > 0 || len(c.CAFile) > 0 + return len(c.TLSClientConfig.CAData) > 0 || len(c.TLSClientConfig.CAFile) > 0 } // HasCertAuth is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCertAuth. // HasCertAuth returns whether the configuration has certificate authentication or not. func (c *restConfig) HasCertAuth() bool { - return len(c.CertData) != 0 || len(c.CertFile) != 0 + return len(c.TLSClientConfig.CertData) != 0 || len(c.TLSClientConfig.CertFile) != 0 } // clientcmdConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Config. diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go index 9c9a17a58..200dab593 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go @@ -9,9 +9,9 @@ import ( "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" "github.com/containers/image/v5/types" - bolt "github.com/etcd-io/bbolt" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" + bolt "go.etcd.io/bbolt" ) var ( diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index b7dddd0d6..dae3eb586 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -18,7 +18,8 @@ import ( ) type dockerAuthConfig struct { - Auth string `json:"auth,omitempty"` + Auth string `json:"auth,omitempty"` + IdentityToken string `json:"identitytoken,omitempty"` } type dockerConfigFile struct { @@ -72,20 +73,23 @@ func SetAuthentication(sys *types.SystemContext, registry, username, password st }) } -// GetAuthentication returns the registry credentials stored in -// either auth.json file or .docker/config.json -// If an entry is not found empty strings are returned for the username and password -func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { +// GetCredentials returns the registry credentials stored in either auth.json +// file or .docker/config.json, including support for OAuth2 and IdentityToken. +// If an entry is not found, an empty struct is returned. +func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) { if sys != nil && sys.DockerAuthConfig != nil { logrus.Debug("Returning credentials from DockerAuthConfig") - return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil + return *sys.DockerAuthConfig, nil } if enableKeyring { username, password, err := getAuthFromKernelKeyring(registry) if err == nil { logrus.Debug("returning credentials from kernel keyring") - return username, password, nil + return types.DockerAuthConfig{ + Username: username, + Password: password, + }, nil } } @@ -104,18 +108,39 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true}) for _, path := range paths { - username, password, err := findAuthentication(registry, path.path, path.legacyFormat) + authConfig, err := findAuthentication(registry, path.path, path.legacyFormat) if err != nil { logrus.Debugf("Credentials not found") - return "", "", err + return types.DockerAuthConfig{}, err } - if username != "" && password != "" { + + if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" { logrus.Debugf("Returning credentials from %s", path.path) - return username, password, nil + return authConfig, nil } } + logrus.Debugf("Credentials not found") - return "", "", nil + return types.DockerAuthConfig{}, nil +} + +// GetAuthentication returns the registry credentials stored in +// either auth.json file or .docker/config.json +// If an entry is not found empty strings are returned for the username and password +// +// Deprecated: This API only has support for username and password. To get the +// support for oauth2 in docker registry authentication, we added the new +// GetCredentials API. The new API should be used and this API is kept to +// maintain backward compatibility. +func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { + auth, err := GetCredentials(sys, registry) + if err != nil { + return "", "", err + } + if auth.IdentityToken != "" { + return "", "", errors.Wrap(ErrNotSupported, "non-empty identity token found and this API doesn't support it") + } + return auth.Username, auth.Password, nil } // RemoveAuthentication deletes the credentials stored in auth.json @@ -294,20 +319,28 @@ func deleteAuthFromCredHelper(credHelper, registry string) error { } // findAuthentication looks for auth of registry in path -func findAuthentication(registry, path string, legacyFormat bool) (string, string, error) { +func findAuthentication(registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) { auths, err := readJSONFile(path, legacyFormat) if err != nil { - return "", "", errors.Wrapf(err, "error reading JSON file %q", path) + return types.DockerAuthConfig{}, errors.Wrapf(err, "error reading JSON file %q", path) } // First try cred helpers. They should always be normalized. if ch, exists := auths.CredHelpers[registry]; exists { - return getAuthFromCredHelper(ch, registry) + username, password, err := getAuthFromCredHelper(ch, registry) + if err != nil { + return types.DockerAuthConfig{}, err + } + + return types.DockerAuthConfig{ + Username: username, + Password: password, + }, nil } // I'm feeling lucky if val, exists := auths.AuthConfigs[registry]; exists { - return decodeDockerAuth(val.Auth) + return decodeDockerAuth(val) } // bad luck; let's normalize the entries first @@ -316,25 +349,35 @@ func findAuthentication(registry, path string, legacyFormat bool) (string, strin for k, v := range auths.AuthConfigs { normalizedAuths[normalizeRegistry(k)] = v } + if val, exists := normalizedAuths[registry]; exists { - return decodeDockerAuth(val.Auth) + return decodeDockerAuth(val) } - return "", "", nil + + return types.DockerAuthConfig{}, nil } -func decodeDockerAuth(s string) (string, string, error) { - decoded, err := base64.StdEncoding.DecodeString(s) +// decodeDockerAuth decodes the username and password, which is +// encoded in base64. +func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) { + decoded, err := base64.StdEncoding.DecodeString(conf.Auth) if err != nil { - return "", "", err + return types.DockerAuthConfig{}, err } + parts := strings.SplitN(string(decoded), ":", 2) if len(parts) != 2 { // if it's invalid just skip, as docker does - return "", "", nil + return types.DockerAuthConfig{}, nil } + user := parts[0] password := strings.Trim(parts[1], "\x00") - return user, password, nil + return types.DockerAuthConfig{ + Username: user, + Password: password, + IdentityToken: conf.IdentityToken, + }, nil } // convertToHostname converts a registry url which has http|https prepended diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index 60d67dfdc..8ecb47de4 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -2,16 +2,17 @@ package sysregistriesv2 import ( "fmt" - "io/ioutil" "os" "path/filepath" "regexp" + "sort" "strings" "sync" "github.com/BurntSushi/toml" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -26,6 +27,16 @@ var systemRegistriesConfPath = builtinRegistriesConfPath // DO NOT change this, instead see systemRegistriesConfPath above. const builtinRegistriesConfPath = "/etc/containers/registries.conf" +// systemRegistriesConfDirPath is the path to the system-wide registry +// configuration directory and is used to add/subtract potential registries for +// obtaining images. You can override this at build time with +// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfDirecotyPath=$your_path' +var systemRegistriesConfDirPath = builtinRegistriesConfDirPath + +// builtinRegistriesConfDirPath is the path to the registry configuration directory. +// DO NOT change this, instead see systemRegistriesConfDirectoryPath above. +const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d" + // Endpoint describes a remote location of a registry. type Endpoint struct { // The endpoint's remote location. @@ -35,6 +46,12 @@ type Endpoint struct { Insecure bool `toml:"insecure,omitempty"` } +// userRegistriesFile is the path to the per user registry configuration file. +var userRegistriesFile = filepath.FromSlash(".config/containers/registries.conf") + +// userRegistriesDir is the path to the per user registry configuration file. +var userRegistriesDir = filepath.FromSlash(".config/containers/registries.conf.d") + // rewriteReference will substitute the provided reference `prefix` to the // endpoints `location` from the `ref` and creates a new named reference from it. // The function errors if the newly created reference is not parsable. @@ -49,7 +66,7 @@ func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (referen if err != nil { return nil, errors.Wrapf(err, "error rewriting reference") } - logrus.Debugf("reference rewritten from '%v' to '%v'", refString, newParsedRef.String()) + return newParsedRef, nil } @@ -302,29 +319,83 @@ func (config *V2RegistriesConf) postProcess() error { config.UnqualifiedSearchRegistries[i] = registry } + // Registries are ordered and the first longest prefix always wins, + // rendering later items with the same prefix non-existent. We cannot error + // out anymore as this might break existing users, so let's just ignore them + // to guarantee that the same prefix exists only once. + knownPrefixes := make(map[string]bool) + uniqueRegistries := []Registry{} + for i := range config.Registries { + // TODO: should we warn if we see the same prefix being used multiple times? + if _, exists := knownPrefixes[config.Registries[i].Prefix]; !exists { + knownPrefixes[config.Registries[i].Prefix] = true + uniqueRegistries = append(uniqueRegistries, config.Registries[i]) + } + } + config.Registries = uniqueRegistries + return nil } // ConfigPath returns the path to the system-wide registry configuration file. func ConfigPath(ctx *types.SystemContext) string { - confPath := systemRegistriesConfPath - if ctx != nil { - if ctx.SystemRegistriesConfPath != "" { - confPath = ctx.SystemRegistriesConfPath - } else if ctx.RootForImplicitAbsolutePaths != "" { - confPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath) - } + if ctx != nil && ctx.SystemRegistriesConfPath != "" { + return ctx.SystemRegistriesConfPath + } + + userRegistriesFilePath := filepath.Join(homedir.Get(), userRegistriesFile) + if _, err := os.Stat(userRegistriesFilePath); err == nil { + return userRegistriesFilePath + } + + if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" { + return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath) + } + + return systemRegistriesConfPath +} + +// ConfigDirPath returns the path to the system-wide directory for drop-in +// registry configuration files. +func ConfigDirPath(ctx *types.SystemContext) string { + if ctx != nil && ctx.SystemRegistriesConfDirPath != "" { + return ctx.SystemRegistriesConfDirPath + } + + userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir) + if _, err := os.Stat(userRegistriesDirPath); err == nil { + return userRegistriesDirPath + } + + if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" { + return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfDirPath) + } + + return systemRegistriesConfDirPath +} + +// configWrapper is used to store the paths from ConfigPath and ConfigDirPath +// and acts as a key to the internal cache. +type configWrapper struct { + configPath string + configDirPath string +} + +// newConfigWrapper returns a configWrapper for the specified SystemContext. +func newConfigWrapper(ctx *types.SystemContext) configWrapper { + return configWrapper{ + configPath: ConfigPath(ctx), + configDirPath: ConfigDirPath(ctx), } - return confPath } // configMutex is used to synchronize concurrent accesses to configCache. var configMutex = sync.Mutex{} // configCache caches already loaded configs with config paths as keys and is -// used to avoid redudantly parsing configs. Concurrent accesses to the cache +// used to avoid redundantly parsing configs. Concurrent accesses to the cache // are synchronized via configMutex. -var configCache = make(map[string]*V2RegistriesConf) +var configCache = make(map[configWrapper]*V2RegistriesConf) // InvalidateCache invalidates the registry cache. This function is meant to be // used for long-running processes that need to reload potential changes made to @@ -332,66 +403,108 @@ var configCache = make(map[string]*V2RegistriesConf) func InvalidateCache() { configMutex.Lock() defer configMutex.Unlock() - configCache = make(map[string]*V2RegistriesConf) + configCache = make(map[configWrapper]*V2RegistriesConf) } // getConfig returns the config object corresponding to ctx, loading it if it is not yet cached. func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) { - configPath := ConfigPath(ctx) - + wrapper := newConfigWrapper(ctx) configMutex.Lock() - // if the config has already been loaded, return the cached registries - if config, inCache := configCache[configPath]; inCache { + if config, inCache := configCache[wrapper]; inCache { configMutex.Unlock() return config, nil } configMutex.Unlock() - return TryUpdatingCache(ctx) + return tryUpdatingCache(ctx, wrapper) +} + +// dropInConfigs returns a slice of drop-in-configs from the registries.conf.d +// directory. +func dropInConfigs(wrapper configWrapper) ([]string, error) { + var configs []string + + err := filepath.Walk(wrapper.configDirPath, + // WalkFunc to read additional configs + func(path string, info os.FileInfo, err error) error { + switch { + case err != nil: + // return error (could be a permission problem) + return err + case info == nil: + // this should only happen when err != nil but let's be sure + return nil + case info.IsDir(): + if path != wrapper.configDirPath { + // make sure to not recurse into sub-directories + return filepath.SkipDir + } + // ignore directories + return nil + default: + // only add *.conf files + if strings.HasSuffix(path, ".conf") { + configs = append(configs, path) + } + return nil + } + }, + ) + + if err != nil && !os.IsNotExist(err) { + // Ignore IsNotExist errors: most systems won't have a registries.conf.d + // directory. + return nil, errors.Wrapf(err, "error reading registries.conf.d") + } + + return configs, nil } // TryUpdatingCache loads the configuration from the provided `SystemContext` // without using the internal cache. On success, the loaded configuration will // be added into the internal registry cache. func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) { - configPath := ConfigPath(ctx) + return tryUpdatingCache(ctx, newConfigWrapper(ctx)) +} +// tryUpdatingCache implements TryUpdatingCache with an additional configWrapper +// argument to avoid redundantly calculating the config paths. +func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*V2RegistriesConf, error) { configMutex.Lock() defer configMutex.Unlock() // load the config - config, err := loadRegistryConf(configPath) - if err != nil { - // Return an empty []Registry if we use the default config, - // which implies that the config path of the SystemContext - // isn't set. Note: if ctx.SystemRegistriesConfPath points to - // the default config, we will still return an error. + config := &tomlConfig{} + if err := config.loadConfig(wrapper.configPath, false); err != nil { + // Continue with an empty []Registry if we use the default config, which + // implies that the config path of the SystemContext isn't set. + // + // Note: if ctx.SystemRegistriesConfPath points to the default config, + // we will still return an error. if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") { - return &V2RegistriesConf{Registries: []Registry{}}, nil + config = &tomlConfig{} + config.V2RegistriesConf = V2RegistriesConf{Registries: []Registry{}} + } else { + return nil, errors.Wrapf(err, "error loading registries configuration %q", wrapper.configPath) } - return nil, err } - v2Config := &config.V2RegistriesConf - - // backwards compatibility for v1 configs - if config.V1RegistriesConf.Nonempty() { - if config.V2RegistriesConf.Nonempty() { - return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"} - } - v2, err := config.V1RegistriesConf.ConvertToV2() - if err != nil { - return nil, err + // Load the configs from the conf directory path. + dinConfigs, err := dropInConfigs(wrapper) + if err != nil { + return nil, err + } + for _, path := range dinConfigs { + // Enforce v2 format for drop-in-configs. + if err := config.loadConfig(path, true); err != nil { + return nil, errors.Wrapf(err, "error loading drop-in registries configuration %q", path) } - v2Config = v2 } - if err := v2Config.postProcess(); err != nil { - return nil, err - } + v2Config := &config.V2RegistriesConf // populate the cache - configCache[configPath] = v2Config + configCache[wrapper] = v2Config return v2Config, nil } @@ -470,16 +583,72 @@ func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { return nil, nil } -// Loads the registry configuration file from the filesystem and then unmarshals -// it. Returns the unmarshalled object. -func loadRegistryConf(configPath string) (*tomlConfig, error) { - config := &tomlConfig{} +// loadConfig loads and unmarshals the configuration at the specified path. Note +// that v1 configs are translated into v2 and are cleared. Use forceV2 if the +// config must in the v2 format. +// +// Note that specified fields in path will replace already set fields in the +// tomlConfig. Only the [[registry]] tables are merged by prefix. +func (c *tomlConfig) loadConfig(path string, forceV2 bool) error { + logrus.Debugf("Loading registries configuration %q", path) + + // Save the registries before decoding the file where they could be lost. + // We merge them later again. + registryMap := make(map[string]Registry) + for i := range c.Registries { + registryMap[c.Registries[i].Prefix] = c.Registries[i] + } - configBytes, err := ioutil.ReadFile(configPath) + // Load the tomlConfig. Note that `DecodeFile` will overwrite set fields. + c.Registries = nil // important to clear the memory to prevent us from overlapping fields + _, err := toml.DecodeFile(path, c) if err != nil { - return nil, err + return err + } + + if c.V1RegistriesConf.Nonempty() { + // Enforce the v2 format if requested. + if forceV2 { + return &InvalidRegistries{s: "registry must be in v2 format but is in v1"} + } + + // Convert a v1 config into a v2 config. + if c.V2RegistriesConf.Nonempty() { + return &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"} + } + v2, err := c.V1RegistriesConf.ConvertToV2() + if err != nil { + return err + } + c.V1RegistriesConf = V1RegistriesConf{} + c.V2RegistriesConf = *v2 } - err = toml.Unmarshal(configBytes, &config) - return config, err + // Post process registries, set the correct prefixes, sanity checks, etc. + if err := c.postProcess(); err != nil { + return err + } + + // Merge the freshly loaded registries. + for i := range c.Registries { + registryMap[c.Registries[i].Prefix] = c.Registries[i] + } + + // Go maps have a non-deterministic order when iterating the keys, so + // we dump them in a slice and sort it to enforce some order in + // Registries slice. Some consumers of c/image (e.g., CRI-O) log the + // the configuration where a non-deterministic order could easily cause + // confusion. + prefixes := []string{} + for prefix := range registryMap { + prefixes = append(prefixes, prefix) + } + sort.Strings(prefixes) + + c.Registries = []Registry{} + for _, prefix := range prefixes { + c.Registries = append(c.Registries, registryMap[prefix]) + } + + return nil } diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go index 5199fb535..394557f39 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_reference.go +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -30,6 +30,14 @@ func newReference(transport storageTransport, named reference.Named, id string) if named == nil && id == "" { return nil, ErrInvalidReference } + if named != nil && reference.IsNameOnly(named) { + return nil, errors.Wrapf(ErrInvalidReference, "reference %s has neither a tag nor a digest", named.String()) + } + if id != "" { + if err := validateImageID(id); err != nil { + return nil, errors.Wrapf(ErrInvalidReference, "invalid ID value %q: %v", id, err) + } + } // We take a copy of the transport, which contains a pointer to the // store that it used for resolving this reference, so that the // transport that we'll return from Transport() won't be affected by diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go index 62a091da4..c024bee9b 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_transport.go +++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go @@ -43,6 +43,8 @@ type StoreTransport interface { types.ImageTransport // SetStore sets the default store for this transport. SetStore(storage.Store) + // GetStoreIfSet returns the default store for this transport, or nil if not set/determined yet. + GetStoreIfSet() storage.Store // GetImage retrieves the image from the transport's store that's named // by the reference. GetImage(types.ImageReference) (*storage.Image, error) @@ -52,6 +54,9 @@ type StoreTransport interface { // ParseStoreReference parses a reference, overriding any store // specification that it may contain. ParseStoreReference(store storage.Store, reference string) (*storageReference, error) + // NewStoreReference creates a reference for (named@ID) in store. + // either of name or ID can be unset; named must not be a reference.IsNameOnly. + NewStoreReference(store storage.Store, named reference.Named, id string) (*storageReference, error) // SetDefaultUIDMap sets the default UID map to use when opening stores. SetDefaultUIDMap(idmap []idtools.IDMap) // SetDefaultGIDMap sets the default GID map to use when opening stores. @@ -82,6 +87,11 @@ func (s *storageTransport) SetStore(store storage.Store) { s.store = store } +// GetStoreIfSet returns the default store for this transport, as set using SetStore() or initialized by default, or nil if not set/determined yet. +func (s *storageTransport) GetStoreIfSet() storage.Store { + return s.store +} + // SetDefaultUIDMap sets the default UID map to use when opening stores. func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) { s.defaultUIDMap = idmap @@ -129,7 +139,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) ( // If it looks like a digest, leave it alone for now. if _, err := digest.Parse(possibleID); err != nil { // Otherwise… - if idSum, err := digest.Parse("sha256:" + possibleID); err == nil && idSum.Validate() == nil { + if err := validateImageID(possibleID); err == nil { id = possibleID // … it is a full ID } else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) { // … it is a truncated version of the ID of an image that's present in local storage, @@ -167,7 +177,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) ( named = reference.TagNameOnly(named) } - result, err := newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id) + result, err := s.NewStoreReference(store, named, id) if err != nil { return nil, err } @@ -175,6 +185,12 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) ( return result, nil } +// NewStoreReference creates a reference for (named@ID) in store. +// either of name or ID can be unset; named must not be a reference.IsNameOnly. +func (s *storageTransport) NewStoreReference(store storage.Store, named reference.Named, id string) (*storageReference, error) { + return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id) +} + func (s *storageTransport) GetStore() (storage.Store, error) { // Return the transport's previously-set store. If we don't have one // of those, initialize one now. @@ -342,7 +358,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { switch len(fields) { case 1: // name only case 2: // name:tag@ID or name[:tag]@digest - if _, idErr := digest.Parse("sha256:" + fields[1]); idErr != nil { + if idErr := validateImageID(fields[1]); idErr != nil { if _, digestErr := digest.Parse(fields[1]); digestErr != nil { return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error()) } @@ -351,7 +367,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { if _, err := digest.Parse(fields[1]); err != nil { return err } - if _, err := digest.Parse("sha256:" + fields[2]); err != nil { + if err := validateImageID(fields[2]); err != nil { return err } default: // Coverage: This should never happen @@ -363,3 +379,9 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { // are few semantically invalid strings. return nil } + +// validateImageID returns nil if id is a valid (full) image ID, or an error +func validateImageID(id string) error { + _, err := digest.Parse("sha256:" + id) + return err +} diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go index 00150c53b..23f67c49e 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go @@ -22,7 +22,6 @@ type ConfigUpdater interface { } type tarballReference struct { - transport types.ImageTransport config imgspecv1.Image annotations map[string]string filenames []string @@ -43,7 +42,7 @@ func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[ } func (r *tarballReference) Transport() types.ImageTransport { - return r.transport + return Transport } func (r *tarballReference) StringWithinTransport() string { diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go index 113545cb7..d407c657f 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go @@ -48,12 +48,21 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc } f.Close() } - ref := &tarballReference{ - transport: t, - filenames: filenames, - stdin: stdin, + return NewReference(filenames, stdin) +} + +// NewReference creates a new "tarball:" reference for the listed fileNames. +// If any of the fileNames is "-", the contents of stdin are used instead. +func NewReference(fileNames []string, stdin []byte) (types.ImageReference, error) { + for _, path := range fileNames { + if strings.Contains(path, separator) { + return nil, fmt.Errorf("Invalid path %q: paths including the separator %q are not supported", path, separator) + } } - return ref, nil + return &tarballReference{ + filenames: fileNames, + stdin: stdin, + }, nil } func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error { diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index ba249ca25..40556d007 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -399,6 +399,10 @@ type Image interface { // This does not change the state of the original Image object. UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) // SupportsEncryption returns an indicator that the image supports encryption + // + // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since + // the process of updating a manifest between different manifest types was to update then convert. + // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 SupportsEncryption(ctx context.Context) bool // Size returns an approximation of the amount of disk space which is consumed by the image in its current // location. If the size is not known, -1 will be returned. @@ -450,6 +454,11 @@ type ImageInspectInfo struct { type DockerAuthConfig struct { Username string Password string + // IdentityToken can be used as an refresh_token in place of username and + // password to obtain the bearer/access token in oauth2 flow. If identity + // token is set, password should not be set. + // Ref: https://docs.docker.com/registry/spec/auth/oauth/ + IdentityToken string } // OptionalBool is a boolean with an additional undefined value, which is meant @@ -497,6 +506,8 @@ type SystemContext struct { RegistriesDirPath string // Path to the system-wide registries configuration file SystemRegistriesConfPath string + // Path to the system-wide registries configuration directory + SystemRegistriesConfDirPath string // If not "", overrides the default path for the authentication file, but only new format files AuthFilePath string // if not "", overrides the default path for the authentication file, but with the legacy format; @@ -510,6 +521,8 @@ type SystemContext struct { ArchitectureChoice string // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. OSChoice string + // If not "", overrides the use of detected ARM platform variant when choosing an image or verifying variant match. + VariantChoice string // If not "", overrides the system's default directory containing a blob info cache. BlobInfoCacheDir string // Additional tags when creating or copying a docker-archive. @@ -540,7 +553,10 @@ type SystemContext struct { // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. DockerInsecureSkipTLSVerify OptionalBool // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials + // Ignored if DockerBearerRegistryToken is non-empty. DockerAuthConfig *DockerAuthConfig + // if not "", the library uses this registry token to authenticate to the registry + DockerBearerRegistryToken string // if not "", an User-Agent header is added to each request when contacting a registry. DockerRegistryUserAgent string // if true, a V1 ping attempt isn't done to give users a better error. Default is false. diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 0fd7a4a37..4b04d56fc 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,7 +6,7 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 2 + VersionMinor = 3 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 1 diff --git a/vendor/github.com/etcd-io/bbolt/bolt_arm.go b/vendor/github.com/etcd-io/bbolt/bolt_arm.go deleted file mode 100644 index 105d27ddb..000000000 --- a/vendor/github.com/etcd-io/bbolt/bolt_arm.go +++ /dev/null @@ -1,28 +0,0 @@ -package bbolt - -import "unsafe" - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned bool - -func init() { - // Simple check to see whether this arch handles unaligned load/stores - // correctly. - - // ARM9 and older devices require load/stores to be from/to aligned - // addresses. If not, the lower 2 bits are cleared and that address is - // read in a jumbled up order. - - // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html - - raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} - val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) - - brokenUnaligned = val != 0x11222211 -} diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE index 58ebdc162..d32149979 100644 --- a/vendor/github.com/ulikunitz/xz/LICENSE +++ b/vendor/github.com/ulikunitz/xz/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014-2016 Ulrich Kunitz +Copyright (c) 2014-2020 Ulrich Kunitz All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md index 1be3bb845..a4224ce14 100644 --- a/vendor/github.com/ulikunitz/xz/TODO.md +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -1,5 +1,9 @@ # TODO list +## Release v0.5.x + +1. Support check flag in gxz command. + ## Release v0.6 1. Review encoder and check for lzma improvements under xz. @@ -86,6 +90,11 @@ ## Log +### 2020-02-24 + +Release v0.5.7 supports the check-ID None and fixes +[issue #27](https://github.com/ulikunitz/xz/issues/27). + ### 2019-02-20 Release v0.5.6 supports the go.mod file. diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go index fadc1a594..364213dd9 100644 --- a/vendor/github.com/ulikunitz/xz/bits.go +++ b/vendor/github.com/ulikunitz/xz/bits.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/crc.go b/vendor/github.com/ulikunitz/xz/crc.go index b44dca96e..638774ada 100644 --- a/vendor/github.com/ulikunitz/xz/crc.go +++ b/vendor/github.com/ulikunitz/xz/crc.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/format.go b/vendor/github.com/ulikunitz/xz/format.go index 798159c6c..edfec9a94 100644 --- a/vendor/github.com/ulikunitz/xz/format.go +++ b/vendor/github.com/ulikunitz/xz/format.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -46,7 +46,8 @@ const HeaderLen = 12 // Constants for the checksum methods supported by xz. const ( - CRC32 byte = 0x1 + None byte = 0x0 + CRC32 = 0x1 CRC64 = 0x4 SHA256 = 0xa ) @@ -58,7 +59,7 @@ var errInvalidFlags = errors.New("xz: invalid flags") // invalid. func verifyFlags(flags byte) error { switch flags { - case CRC32, CRC64, SHA256: + case None, CRC32, CRC64, SHA256: return nil default: return errInvalidFlags @@ -67,6 +68,7 @@ func verifyFlags(flags byte) error { // flagstrings maps flag values to strings. var flagstrings = map[byte]string{ + None: "None", CRC32: "CRC-32", CRC64: "CRC-64", SHA256: "SHA-256", @@ -85,6 +87,8 @@ func flagString(flags byte) string { // hash method encoded in flags. func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { switch flags { + case None: + newHash = newNoneHash case CRC32: newHash = newCRC32 case CRC64: diff --git a/vendor/github.com/ulikunitz/xz/fox-check-none.xz b/vendor/github.com/ulikunitz/xz/fox-check-none.xz Binary files differnew file mode 100644 index 000000000..46043f7dc --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/fox-check-none.xz diff --git a/vendor/github.com/ulikunitz/xz/go.mod b/vendor/github.com/ulikunitz/xz/go.mod index 9e5eea2c9..330b675bd 100644 --- a/vendor/github.com/ulikunitz/xz/go.mod +++ b/vendor/github.com/ulikunitz/xz/go.mod @@ -1 +1,3 @@ module github.com/ulikunitz/xz + +go 1.12 diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go index a32887872..f2861ba3f 100644 --- a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go +++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go index f99ec2206..e28d23be4 100644 --- a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go +++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go index 58635b113..b8e66d972 100644 --- a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go +++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go index ab6a19ca4..34c81b38a 100644 --- a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go +++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go index 0ba45e8ff..678b5a058 100644 --- a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go +++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go index a781bd195..58d6a92a7 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/bintree.go +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go index e9bab0199..2784ec6ba 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/bitops.go +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go index 5350d814f..4ad09a14e 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/breader.go +++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go index 50e0b6d57..9cb7838ac 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/buffer.go +++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go index a3696ba08..290606ddc 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go +++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go index 16e14db39..e5a760a50 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/decoder.go +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go index 564a12b83..ba06712b0 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go index e08eb989f..e6e0c6ddf 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go index b053a2dce..69871c04a 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go index fe1900a66..59055eb64 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/encoder.go +++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go index 9d0fbc703..40f3d3f64 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go index d786a9745..e82970eac 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go +++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go index bc708969f..cda39462c 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/header.go +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go index ac6a71a5a..cd148812c 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/header2.go +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go index e51773092..927395bd8 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go index c949d6ebd..ca31530fd 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go index 4a244eb1a..7d03ec0dc 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go +++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go index 733bb99da..a75c9b46c 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/operation.go +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go index 24d50ec68..6987a166f 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/prob.go +++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go index 23418e25d..662feba87 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/properties.go +++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go index 6361c5e7c..7189a0377 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go index 2ef3dcaaa..7b7eef31f 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/reader.go +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go index a55cfaa4e..33074e624 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/reader2.go +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go index 502351052..03f061cf1 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/state.go +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go index 504b3d78e..1cb3596fe 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go +++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer.go b/vendor/github.com/ulikunitz/xz/lzma/writer.go index efe34fb6b..5803ecca9 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/writer.go +++ b/vendor/github.com/ulikunitz/xz/lzma/writer.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go index 7c1afe157..c263b0666 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/writer2.go +++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go index 69cf5f7c2..6f4aa2c09 100644 --- a/vendor/github.com/ulikunitz/xz/lzmafilter.go +++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/none-check.go b/vendor/github.com/ulikunitz/xz/none-check.go new file mode 100644 index 000000000..e12d8e476 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/none-check.go @@ -0,0 +1,23 @@ +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import "hash" + +type noneHash struct{} + +func (h noneHash) Write(p []byte) (n int, err error) { return len(p), nil } + +func (h noneHash) Sum(b []byte) []byte { return b } + +func (h noneHash) Reset() {} + +func (h noneHash) Size() int { return 0 } + +func (h noneHash) BlockSize() int { return 0 } + +func newNoneHash() hash.Hash { + return &noneHash{} +} diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go index 0634c6bcc..22cd6d500 100644 --- a/vendor/github.com/ulikunitz/xz/reader.go +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -283,7 +283,11 @@ func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, if err != nil { return nil, err } - br.r = io.TeeReader(fr, br.hash) + if br.hash.Size() != 0 { + br.r = io.TeeReader(fr, br.hash) + } else { + br.r = fr + } return br, nil } diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go index c126f7099..aec10dfa6 100644 --- a/vendor/github.com/ulikunitz/xz/writer.go +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -18,8 +18,10 @@ type WriterConfig struct { DictCap int BufSize int BlockSize int64 - // checksum method: CRC32, CRC64 or SHA256 + // checksum method: CRC32, CRC64 or SHA256 (default: CRC64) CheckSum byte + // Forces NoChecksum (default: false) + NoCheckSum bool // match algorithm Matcher lzma.MatchAlgorithm } @@ -41,6 +43,9 @@ func (c *WriterConfig) fill() { if c.CheckSum == 0 { c.CheckSum = CRC64 } + if c.NoCheckSum { + c.CheckSum = None + } } // Verify checks the configuration for errors. Zero values will be @@ -284,7 +289,11 @@ func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWr if err != nil { return nil, err } - bw.mw = io.MultiWriter(bw.w, bw.hash) + if bw.hash.Size() != 0 { + bw.mw = io.MultiWriter(bw.w, bw.hash) + } else { + bw.mw = bw.w + } return bw, nil } diff --git a/vendor/github.com/vbauerster/mpb/v4/bar.go b/vendor/github.com/vbauerster/mpb/v4/bar.go index c362da739..1828e67a6 100644 --- a/vendor/github.com/vbauerster/mpb/v4/bar.go +++ b/vendor/github.com/vbauerster/mpb/v4/bar.go @@ -29,11 +29,11 @@ func (f FillerFunc) Fill(w io.Writer, width int, stat *decor.Statistics) { f(w, width, stat) } -// Wrapper interface. +// WrapFiller interface. // If you're implementing custom Filler by wrapping a built-in one, // it is necessary to implement this interface to retain functionality // of built-in Filler. -type Wrapper interface { +type WrapFiller interface { Base() Filler } diff --git a/vendor/github.com/vbauerster/mpb/v4/bar_filler.go b/vendor/github.com/vbauerster/mpb/v4/bar_filler.go index 0d751a68d..fab4aa229 100644 --- a/vendor/github.com/vbauerster/mpb/v4/bar_filler.go +++ b/vendor/github.com/vbauerster/mpb/v4/bar_filler.go @@ -18,13 +18,14 @@ const ( rRefill ) -// DefaultBarStyle is applied when bar constructed with *Progress.AddBar method. +// DefaultBarStyle is a string containing 7 runes. +// Each rune is a building block of a progress bar. // -// '1th rune' stands for left boundary rune +// '1st rune' stands for left boundary rune // -// '2th rune' stands for fill rune +// '2nd rune' stands for fill rune // -// '3th rune' stands for tip rune +// '3rd rune' stands for tip rune // // '4th rune' stands for empty rune // @@ -44,16 +45,16 @@ type barFiller struct { flush func(w io.Writer, bb [][]byte) } -// NewBarFiller constucts mpb.Filler, to be used with *Progress.Add method. +// NewBarFiller constucts mpb.Filler, to be used with *Progress.Add(...) *Bar method. func NewBarFiller(style string, reverse bool) Filler { if style == "" { style = DefaultBarStyle } bf := &barFiller{ - format: make([][]byte, utf8.RuneCountInString(style)), + format: make([][]byte, utf8.RuneCountInString(style)), + reverse: reverse, } bf.SetStyle(style) - bf.SetReverse(reverse) return bf } @@ -66,28 +67,16 @@ func (s *barFiller) SetStyle(style string) { src = append(src, []byte(string(r))) } copy(s.format, src) - if s.reverse { - s.tip = s.format[rRevTip] - } else { - s.tip = s.format[rTip] - } + s.SetReverse(s.reverse) } func (s *barFiller) SetReverse(reverse bool) { if reverse { s.tip = s.format[rRevTip] - s.flush = func(w io.Writer, bb [][]byte) { - for i := len(bb) - 1; i >= 0; i-- { - w.Write(bb[i]) - } - } + s.flush = reverseFlush } else { s.tip = s.format[rTip] - s.flush = func(w io.Writer, bb [][]byte) { - for i := 0; i < len(bb); i++ { - w.Write(bb[i]) - } - } + s.flush = normalFlush } s.reverse = reverse } @@ -135,3 +124,15 @@ func (s *barFiller) Fill(w io.Writer, width int, stat *decor.Statistics) { s.flush(w, bb) } + +func normalFlush(w io.Writer, bb [][]byte) { + for i := 0; i < len(bb); i++ { + w.Write(bb[i]) + } +} + +func reverseFlush(w io.Writer, bb [][]byte) { + for i := len(bb) - 1; i >= 0; i-- { + w.Write(bb[i]) + } +} diff --git a/vendor/github.com/vbauerster/mpb/v4/bar_option.go b/vendor/github.com/vbauerster/mpb/v4/bar_option.go index 7fb152562..be0c36215 100644 --- a/vendor/github.com/vbauerster/mpb/v4/bar_option.go +++ b/vendor/github.com/vbauerster/mpb/v4/bar_option.go @@ -199,8 +199,8 @@ func MakeFillerTypeSpecificBarOption( } } -// BarOptOnCond returns option when condition evaluates to true. -func BarOptOnCond(option BarOption, condition func() bool) BarOption { +// BarOptOn returns option when condition evaluates to true. +func BarOptOn(option BarOption, condition func() bool) BarOption { if condition() { return option } diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/any.go b/vendor/github.com/vbauerster/mpb/v4/decor/any.go new file mode 100644 index 000000000..bf9cf51a5 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v4/decor/any.go @@ -0,0 +1,21 @@ +package decor + +// Any decorator displays text, that can be changed during decorator's +// lifetime via provided func call back. +// +// `f` call back which provides string to display +// +// `wcc` optional WC config +// +func Any(f func(*Statistics) string, wcc ...WC) Decorator { + return &any{initWC(wcc...), f} +} + +type any struct { + WC + f func(*Statistics) string +} + +func (d *any) Decor(s *Statistics) string { + return d.FormatMsg(d.f(s)) +} diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/counters.go b/vendor/github.com/vbauerster/mpb/v4/decor/counters.go index 32bcdf76a..297bf937b 100644 --- a/vendor/github.com/vbauerster/mpb/v4/decor/counters.go +++ b/vendor/github.com/vbauerster/mpb/v4/decor/counters.go @@ -43,24 +43,7 @@ func CountersKiloByte(pairFmt string, wcc ...WC) Decorator { // pairFmt="% d / % d" output: "1 MB / 12 MB" // func Counters(unit int, pairFmt string, wcc ...WC) Decorator { - var wc WC - for _, widthConf := range wcc { - wc = widthConf - } - d := &countersDecorator{ - WC: wc.Init(), - producer: chooseSizeProducer(unit, pairFmt), - } - return d -} - -type countersDecorator struct { - WC - producer func(*Statistics) string -} - -func (d *countersDecorator) Decor(st *Statistics) string { - return d.FormatMsg(d.producer(st)) + return Any(chooseSizeProducer(unit, pairFmt), wcc...) } func chooseSizeProducer(unit int, format string) func(*Statistics) string { @@ -69,16 +52,16 @@ func chooseSizeProducer(unit int, format string) func(*Statistics) string { } switch unit { case UnitKiB: - return func(st *Statistics) string { - return fmt.Sprintf(format, SizeB1024(st.Current), SizeB1024(st.Total)) + return func(s *Statistics) string { + return fmt.Sprintf(format, SizeB1024(s.Current), SizeB1024(s.Total)) } case UnitKB: - return func(st *Statistics) string { - return fmt.Sprintf(format, SizeB1000(st.Current), SizeB1000(st.Total)) + return func(s *Statistics) string { + return fmt.Sprintf(format, SizeB1000(s.Current), SizeB1000(s.Total)) } default: - return func(st *Statistics) string { - return fmt.Sprintf(format, st.Current, st.Total) + return func(s *Statistics) string { + return fmt.Sprintf(format, s.Current, s.Total) } } } diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/decorator.go b/vendor/github.com/vbauerster/mpb/v4/decor/decorator.go index 2271cbbe1..01b67802c 100644 --- a/vendor/github.com/vbauerster/mpb/v4/decor/decorator.go +++ b/vendor/github.com/vbauerster/mpb/v4/decor/decorator.go @@ -176,3 +176,11 @@ func (wc *WC) GetConf() WC { func (wc *WC) SetConf(conf WC) { *wc = conf.Init() } + +func initWC(wcc ...WC) WC { + var wc WC + for _, nwc := range wcc { + wc = nwc + } + return wc.Init() +} diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/elapsed.go b/vendor/github.com/vbauerster/mpb/v4/decor/elapsed.go index ac2873143..c9999a3b5 100644 --- a/vendor/github.com/vbauerster/mpb/v4/decor/elapsed.go +++ b/vendor/github.com/vbauerster/mpb/v4/decor/elapsed.go @@ -9,6 +9,7 @@ import ( // `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] // // `wcc` optional WC config +// func Elapsed(style TimeStyle, wcc ...WC) Decorator { return NewElapsed(style, time.Now(), wcc...) } @@ -20,29 +21,15 @@ func Elapsed(style TimeStyle, wcc ...WC) Decorator { // `startTime` start time // // `wcc` optional WC config +// func NewElapsed(style TimeStyle, startTime time.Time, wcc ...WC) Decorator { - var wc WC - for _, widthConf := range wcc { - wc = widthConf - } - d := &elapsedDecorator{ - WC: wc.Init(), - startTime: startTime, - producer: chooseTimeProducer(style), - } - return d -} - -type elapsedDecorator struct { - WC - startTime time.Time - producer func(time.Duration) string - msg string -} - -func (d *elapsedDecorator) Decor(st *Statistics) string { - if !st.Completed { - d.msg = d.producer(time.Since(d.startTime)) + var msg string + producer := chooseTimeProducer(style) + f := func(s *Statistics) string { + if !s.Completed { + msg = producer(time.Since(startTime)) + } + return msg } - return d.FormatMsg(d.msg) + return Any(f, wcc...) } diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/eta.go b/vendor/github.com/vbauerster/mpb/v4/decor/eta.go index 818cded17..e875e96fa 100644 --- a/vendor/github.com/vbauerster/mpb/v4/decor/eta.go +++ b/vendor/github.com/vbauerster/mpb/v4/decor/eta.go @@ -33,7 +33,7 @@ func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator { } else { average = ewma.NewMovingAverage(age) } - return MovingAverageETA(style, average, nil, wcc...) + return MovingAverageETA(style, NewThreadSafeMovingAverage(average), nil, wcc...) } // MovingAverageETA decorator relies on MovingAverage implementation to calculate its average. @@ -45,13 +45,10 @@ func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator { // `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer] // // `wcc` optional WC config +// func MovingAverageETA(style TimeStyle, average MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator { - var wc WC - for _, widthConf := range wcc { - wc = widthConf - } d := &movingAverageETA{ - WC: wc.Init(), + WC: initWC(wcc...), average: average, normalizer: normalizer, producer: chooseTimeProducer(style), @@ -66,9 +63,9 @@ type movingAverageETA struct { producer func(time.Duration) string } -func (d *movingAverageETA) Decor(st *Statistics) string { +func (d *movingAverageETA) Decor(s *Statistics) string { v := math.Round(d.average.Value()) - remaining := time.Duration((st.Total - st.Current) * int64(v)) + remaining := time.Duration((s.Total - s.Current) * int64(v)) if d.normalizer != nil { remaining = d.normalizer.Normalize(remaining) } @@ -92,6 +89,7 @@ func (d *movingAverageETA) NextAmount(n int64, wdd ...time.Duration) { // `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] // // `wcc` optional WC config +// func AverageETA(style TimeStyle, wcc ...WC) Decorator { return NewAverageETA(style, time.Now(), nil, wcc...) } @@ -105,13 +103,10 @@ func AverageETA(style TimeStyle, wcc ...WC) Decorator { // `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer] // // `wcc` optional WC config +// func NewAverageETA(style TimeStyle, startTime time.Time, normalizer TimeNormalizer, wcc ...WC) Decorator { - var wc WC - for _, widthConf := range wcc { - wc = widthConf - } d := &averageETA{ - WC: wc.Init(), + WC: initWC(wcc...), startTime: startTime, normalizer: normalizer, producer: chooseTimeProducer(style), @@ -126,12 +121,12 @@ type averageETA struct { producer func(time.Duration) string } -func (d *averageETA) Decor(st *Statistics) string { +func (d *averageETA) Decor(s *Statistics) string { var remaining time.Duration - if st.Current != 0 { - durPerItem := float64(time.Since(d.startTime)) / float64(st.Current) + if s.Current != 0 { + durPerItem := float64(time.Since(d.startTime)) / float64(s.Current) durPerItem = math.Round(durPerItem) - remaining = time.Duration((st.Total - st.Current) * int64(durPerItem)) + remaining = time.Duration((s.Total - s.Current) * int64(durPerItem)) if d.normalizer != nil { remaining = d.normalizer.Normalize(remaining) } diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/merge.go b/vendor/github.com/vbauerster/mpb/v4/decor/merge.go index 723869209..520f13a7f 100644 --- a/vendor/github.com/vbauerster/mpb/v4/decor/merge.go +++ b/vendor/github.com/vbauerster/mpb/v4/decor/merge.go @@ -64,8 +64,8 @@ func (d *mergeDecorator) Base() Decorator { return d.Decorator } -func (d *mergeDecorator) Decor(st *Statistics) string { - msg := d.Decorator.Decor(st) +func (d *mergeDecorator) Decor(s *Statistics) string { + msg := d.Decorator.Decor(s) msgLen := utf8.RuneCountInString(msg) if (d.wc.C & DextraSpace) != 0 { msgLen++ @@ -101,6 +101,6 @@ type placeHolderDecorator struct { WC } -func (d *placeHolderDecorator) Decor(_ *Statistics) string { +func (d *placeHolderDecorator) Decor(*Statistics) string { return "" } diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/moving_average.go b/vendor/github.com/vbauerster/mpb/v4/decor/moving_average.go index 933b1f2cd..6acdb4ace 100644 --- a/vendor/github.com/vbauerster/mpb/v4/decor/moving_average.go +++ b/vendor/github.com/vbauerster/mpb/v4/decor/moving_average.go @@ -2,6 +2,7 @@ package decor import ( "sort" + "sync" "github.com/VividCortex/ewma" ) @@ -11,6 +12,38 @@ import ( // or exponentially decaying. type MovingAverage = ewma.MovingAverage +type threadSafeMovingAverage struct { + ewma.MovingAverage + mu sync.Mutex +} + +func (s *threadSafeMovingAverage) Add(value float64) { + s.mu.Lock() + s.MovingAverage.Add(value) + s.mu.Unlock() +} + +func (s *threadSafeMovingAverage) Value() float64 { + s.mu.Lock() + defer s.mu.Unlock() + return s.MovingAverage.Value() +} + +func (s *threadSafeMovingAverage) Set(value float64) { + s.mu.Lock() + s.MovingAverage.Set(value) + s.mu.Unlock() +} + +// NewThreadSafeMovingAverage converts provided ewma.MovingAverage +// into thread safe ewma.MovingAverage. +func NewThreadSafeMovingAverage(average ewma.MovingAverage) ewma.MovingAverage { + if tsma, ok := average.(*threadSafeMovingAverage); ok { + return tsma + } + return &threadSafeMovingAverage{MovingAverage: average} +} + type medianWindow [3]float64 func (s *medianWindow) Len() int { return len(s) } @@ -36,5 +69,5 @@ func (s *medianWindow) Set(value float64) { // NewMedian is fixed last 3 samples median MovingAverage. func NewMedian() MovingAverage { - return new(medianWindow) + return NewThreadSafeMovingAverage(new(medianWindow)) } diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/name.go b/vendor/github.com/vbauerster/mpb/v4/decor/name.go index 2d5865f6c..a7d477e07 100644 --- a/vendor/github.com/vbauerster/mpb/v4/decor/name.go +++ b/vendor/github.com/vbauerster/mpb/v4/decor/name.go @@ -1,27 +1,12 @@ package decor -// Name returns name decorator. +// Name decorator displays text that is set once and can't be changed +// during decorator's lifetime. // -// `name` string to display +// `str` string to display // // `wcc` optional WC config -func Name(name string, wcc ...WC) Decorator { - var wc WC - for _, widthConf := range wcc { - wc = widthConf - } - d := &nameDecorator{ - WC: wc.Init(), - msg: name, - } - return d -} - -type nameDecorator struct { - WC - msg string -} - -func (d *nameDecorator) Decor(st *Statistics) string { - return d.FormatMsg(d.msg) +// +func Name(str string, wcc ...WC) Decorator { + return Any(func(*Statistics) string { return str }, wcc...) } diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/on_complete.go b/vendor/github.com/vbauerster/mpb/v4/decor/on_complete.go index 714a0ded3..0a1526bf5 100644 --- a/vendor/github.com/vbauerster/mpb/v4/decor/on_complete.go +++ b/vendor/github.com/vbauerster/mpb/v4/decor/on_complete.go @@ -6,6 +6,7 @@ package decor // `decorator` Decorator to wrap // // `message` message to display on complete event +// func OnComplete(decorator Decorator, message string) Decorator { d := &onCompleteWrapper{ Decorator: decorator, @@ -23,12 +24,12 @@ type onCompleteWrapper struct { msg string } -func (d *onCompleteWrapper) Decor(st *Statistics) string { - if st.Completed { +func (d *onCompleteWrapper) Decor(s *Statistics) string { + if s.Completed { wc := d.GetConf() return wc.FormatMsg(d.msg) } - return d.Decorator.Decor(st) + return d.Decorator.Decor(s) } func (d *onCompleteWrapper) Base() Decorator { diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v4/decor/percentage.go index abf343a35..efb2f3ef5 100644 --- a/vendor/github.com/vbauerster/mpb/v4/decor/percentage.go +++ b/vendor/github.com/vbauerster/mpb/v4/decor/percentage.go @@ -37,36 +37,22 @@ func Percentage(wcc ...WC) Decorator { return NewPercentage("% d", wcc...) } -// NewPercentage percentage decorator with custom fmt string. +// NewPercentage percentage decorator with custom format string. // -// fmt examples: +// format examples: // -// fmt="%.1f" output: "1.0%" -// fmt="% .1f" output: "1.0 %" -// fmt="%d" output: "1%" -// fmt="% d" output: "1 %" +// format="%.1f" output: "1.0%" +// format="% .1f" output: "1.0 %" +// format="%d" output: "1%" +// format="% d" output: "1 %" // -func NewPercentage(fmt string, wcc ...WC) Decorator { - var wc WC - for _, widthConf := range wcc { - wc = widthConf +func NewPercentage(format string, wcc ...WC) Decorator { + if format == "" { + format = "% d" } - if fmt == "" { - fmt = "% d" + f := func(s *Statistics) string { + p := internal.Percentage(s.Total, s.Current, 100) + return fmt.Sprintf(format, percentageType(p)) } - d := &percentageDecorator{ - WC: wc.Init(), - fmt: fmt, - } - return d -} - -type percentageDecorator struct { - WC - fmt string -} - -func (d *percentageDecorator) Decor(st *Statistics) string { - p := internal.Percentage(st.Total, st.Current, 100) - return d.FormatMsg(fmt.Sprintf(d.fmt, percentageType(p))) + return Any(f, wcc...) } diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/speed.go b/vendor/github.com/vbauerster/mpb/v4/decor/speed.go index 795a5536f..93f5763e1 100644 --- a/vendor/github.com/vbauerster/mpb/v4/decor/speed.go +++ b/vendor/github.com/vbauerster/mpb/v4/decor/speed.go @@ -9,12 +9,20 @@ import ( "github.com/VividCortex/ewma" ) -// SpeedFormatter is wrapper for SizeB1024 and SizeB1000 to format value as speed/s. -type SpeedFormatter struct { +// FmtAsSpeed adds "/s" to the end of the input formatter. To be +// used with SizeB1000 or SizeB1024 types, for example: +// +// fmt.Printf("%.1f", FmtAsSpeed(SizeB1024(2048))) +// +func FmtAsSpeed(input fmt.Formatter) fmt.Formatter { + return &speedFormatter{input} +} + +type speedFormatter struct { fmt.Formatter } -func (self *SpeedFormatter) Format(st fmt.State, verb rune) { +func (self *speedFormatter) Format(st fmt.State, verb rune) { self.Formatter.Format(st, verb) io.WriteString(st, "/s") } @@ -30,7 +38,7 @@ func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator { } else { average = ewma.NewMovingAverage(age) } - return MovingAverageSpeed(unit, format, average, wcc...) + return MovingAverageSpeed(unit, format, NewThreadSafeMovingAverage(average), wcc...) } // MovingAverageSpeed decorator relies on MovingAverage implementation @@ -52,15 +60,11 @@ func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator { // unit=UnitKB, format="% .1f" output: "1.0 MB/s" // func MovingAverageSpeed(unit int, format string, average MovingAverage, wcc ...WC) Decorator { - var wc WC - for _, widthConf := range wcc { - wc = widthConf - } if format == "" { format = "%.0f" } d := &movingAverageSpeed{ - WC: wc.Init(), + WC: initWC(wcc...), average: average, producer: chooseSpeedProducer(unit, format), } @@ -74,8 +78,8 @@ type movingAverageSpeed struct { msg string } -func (d *movingAverageSpeed) Decor(st *Statistics) string { - if !st.Completed { +func (d *movingAverageSpeed) Decor(s *Statistics) string { + if !s.Completed { var speed float64 if v := d.average.Value(); v > 0 { speed = 1 / v @@ -122,15 +126,11 @@ func AverageSpeed(unit int, format string, wcc ...WC) Decorator { // unit=UnitKB, format="% .1f" output: "1.0 MB/s" // func NewAverageSpeed(unit int, format string, startTime time.Time, wcc ...WC) Decorator { - var wc WC - for _, widthConf := range wcc { - wc = widthConf - } if format == "" { format = "%.0f" } d := &averageSpeed{ - WC: wc.Init(), + WC: initWC(wcc...), startTime: startTime, producer: chooseSpeedProducer(unit, format), } @@ -144,9 +144,9 @@ type averageSpeed struct { msg string } -func (d *averageSpeed) Decor(st *Statistics) string { - if !st.Completed { - speed := float64(st.Current) / float64(time.Since(d.startTime)) +func (d *averageSpeed) Decor(s *Statistics) string { + if !s.Completed { + speed := float64(s.Current) / float64(time.Since(d.startTime)) d.msg = d.producer(speed * 1e9) } @@ -161,11 +161,11 @@ func chooseSpeedProducer(unit int, format string) func(float64) string { switch unit { case UnitKiB: return func(speed float64) string { - return fmt.Sprintf(format, &SpeedFormatter{SizeB1024(math.Round(speed))}) + return fmt.Sprintf(format, FmtAsSpeed(SizeB1024(math.Round(speed)))) } case UnitKB: return func(speed float64) string { - return fmt.Sprintf(format, &SpeedFormatter{SizeB1000(math.Round(speed))}) + return fmt.Sprintf(format, FmtAsSpeed(SizeB1000(math.Round(speed)))) } default: return func(speed float64) string { diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/spinner.go b/vendor/github.com/vbauerster/mpb/v4/decor/spinner.go index 24f553142..abfb2f76c 100644 --- a/vendor/github.com/vbauerster/mpb/v4/decor/spinner.go +++ b/vendor/github.com/vbauerster/mpb/v4/decor/spinner.go @@ -8,28 +8,14 @@ var defaultSpinnerStyle = []string{"â ‹", "â ™", "â ¹", "â ¸", "â ¼", "â ´", "â // // `wcc` optional WC config func Spinner(frames []string, wcc ...WC) Decorator { - var wc WC - for _, widthConf := range wcc { - wc = widthConf - } if len(frames) == 0 { frames = defaultSpinnerStyle } - d := &spinnerDecorator{ - WC: wc.Init(), - frames: frames, + var count uint + f := func(s *Statistics) string { + frame := frames[count%uint(len(frames))] + count++ + return frame } - return d -} - -type spinnerDecorator struct { - WC - frames []string - count uint -} - -func (d *spinnerDecorator) Decor(st *Statistics) string { - frame := d.frames[d.count%uint(len(d.frames))] - d.count++ - return d.FormatMsg(frame) + return Any(f, wcc...) } diff --git a/vendor/github.com/vbauerster/mpb/v4/go.mod b/vendor/github.com/vbauerster/mpb/v4/go.mod index 9e7287d5d..43b42d496 100644 --- a/vendor/github.com/vbauerster/mpb/v4/go.mod +++ b/vendor/github.com/vbauerster/mpb/v4/go.mod @@ -3,8 +3,8 @@ module github.com/vbauerster/mpb/v4 require ( github.com/VividCortex/ewma v1.1.1 github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d - golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 - golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056 // indirect + golang.org/x/crypto v0.0.0-20200214034016-1d94cc7ab1c6 + golang.org/x/sys v0.0.0-20200217220822-9197077df867 // indirect ) go 1.13 diff --git a/vendor/github.com/vbauerster/mpb/v4/go.sum b/vendor/github.com/vbauerster/mpb/v4/go.sum index 5a1316274..3d6d33a5c 100644 --- a/vendor/github.com/vbauerster/mpb/v4/go.sum +++ b/vendor/github.com/vbauerster/mpb/v4/go.sum @@ -3,11 +3,11 @@ github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmx github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4= -golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200214034016-1d94cc7ab1c6 h1:Sy5bstxEqwwbYs6n0/pBuxKENqOeZUgD45Gp3Q3pqLg= +golang.org/x/crypto v0.0.0-20200214034016-1d94cc7ab1c6/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056 h1:dHtDnRWQtSx0Hjq9kvKFpBh9uPPKfQN70NZZmvssGwk= -golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867 h1:JoRuNIf+rpHl+VhScRQQvzbHed86tKkqwPMV34T8myw= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/vbauerster/mpb/v4/options.go b/vendor/github.com/vbauerster/mpb/v4/options.go index 6b34fb340..048870284 100644 --- a/vendor/github.com/vbauerster/mpb/v4/options.go +++ b/vendor/github.com/vbauerster/mpb/v4/options.go @@ -96,8 +96,8 @@ func PopCompletedMode() ContainerOption { } } -// ContainerOptOnCond returns option when condition evaluates to true. -func ContainerOptOnCond(option ContainerOption, condition func() bool) ContainerOption { +// ContainerOptOn returns option when condition evaluates to true. +func ContainerOptOn(option ContainerOption, condition func() bool) ContainerOption { if condition() { return option } diff --git a/vendor/github.com/vbauerster/mpb/v4/progress.go b/vendor/github.com/vbauerster/mpb/v4/progress.go index 1150d50bd..c9b72b0e7 100644 --- a/vendor/github.com/vbauerster/mpb/v4/progress.go +++ b/vendor/github.com/vbauerster/mpb/v4/progress.go @@ -4,6 +4,7 @@ import ( "bytes" "container/heap" "context" + "fmt" "io" "io/ioutil" "log" @@ -97,18 +98,19 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { return p } -// AddBar creates a new progress bar and adds to the container. +// AddBar creates a new progress bar and adds it to the rendering queue. func (p *Progress) AddBar(total int64, options ...BarOption) *Bar { return p.Add(total, NewBarFiller(DefaultBarStyle, false), options...) } -// AddSpinner creates a new spinner bar and adds to the container. +// AddSpinner creates a new spinner bar and adds it to the rendering queue. func (p *Progress) AddSpinner(total int64, alignment SpinnerAlignment, options ...BarOption) *Bar { return p.Add(total, NewSpinnerFiller(DefaultSpinnerStyle, alignment), options...) } // Add creates a bar which renders itself by provided filler. // Set total to 0, if you plan to update it later. +// Panics if *Progress instance is done, i.e. called after *Progress.Wait(). func (p *Progress) Add(total int64, filler Filler, options ...BarOption) *Bar { if filler == nil { filler = NewBarFiller(DefaultBarStyle, false) @@ -134,7 +136,7 @@ func (p *Progress) Add(total int64, filler Filler, options ...BarOption) *Bar { return bar case <-p.done: p.bwg.Done() - return nil + panic(fmt.Sprintf("%T instance can't be reused after it's done!", p)) } } @@ -387,7 +389,7 @@ func syncWidth(matrix map[int][]chan int) { } func extractBaseFiller(f Filler) Filler { - if f, ok := f.(Wrapper); ok { + if f, ok := f.(WrapFiller); ok { return extractBaseFiller(f.Base()) } return f diff --git a/vendor/github.com/vbauerster/mpb/v4/spinner_filler.go b/vendor/github.com/vbauerster/mpb/v4/spinner_filler.go index 9f383fb33..f855be44e 100644 --- a/vendor/github.com/vbauerster/mpb/v4/spinner_filler.go +++ b/vendor/github.com/vbauerster/mpb/v4/spinner_filler.go @@ -18,7 +18,7 @@ const ( SpinnerOnRight ) -// DefaultSpinnerStyle is applied when bar constructed with *Progress.AddSpinner method. +// DefaultSpinnerStyle is a slice of strings, which makes a spinner. var DefaultSpinnerStyle = []string{"â ‹", "â ™", "â ¹", "â ¸", "â ¼", "â ´", "â ¦", "â §", "â ‡", "â "} type spinnerFiller struct { @@ -27,7 +27,7 @@ type spinnerFiller struct { alignment SpinnerAlignment } -// NewSpinnerFiller constucts mpb.Filler, to be used with *Progress.Add method. +// NewSpinnerFiller constucts mpb.Filler, to be used with *Progress.Add(...) *Bar method. func NewSpinnerFiller(style []string, alignment SpinnerAlignment) Filler { if len(style) == 0 { style = DefaultSpinnerStyle diff --git a/vendor/github.com/etcd-io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore index 3bcd8cbaf..3bcd8cbaf 100644 --- a/vendor/github.com/etcd-io/bbolt/.gitignore +++ b/vendor/go.etcd.io/bbolt/.gitignore diff --git a/vendor/github.com/etcd-io/bbolt/.travis.yml b/vendor/go.etcd.io/bbolt/.travis.yml index a60300c55..257dfdfee 100644 --- a/vendor/github.com/etcd-io/bbolt/.travis.yml +++ b/vendor/go.etcd.io/bbolt/.travis.yml @@ -4,7 +4,7 @@ go_import_path: go.etcd.io/bbolt sudo: false go: -- 1.11 +- 1.12 before_install: - go get -v honnef.co/go/tools/... diff --git a/vendor/github.com/etcd-io/bbolt/LICENSE b/vendor/go.etcd.io/bbolt/LICENSE index 004e77fe5..004e77fe5 100644 --- a/vendor/github.com/etcd-io/bbolt/LICENSE +++ b/vendor/go.etcd.io/bbolt/LICENSE diff --git a/vendor/github.com/etcd-io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile index 2968aaa61..2968aaa61 100644 --- a/vendor/github.com/etcd-io/bbolt/Makefile +++ b/vendor/go.etcd.io/bbolt/Makefile diff --git a/vendor/github.com/etcd-io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md index e9989efc5..2dff3761d 100644 --- a/vendor/github.com/etcd-io/bbolt/README.md +++ b/vendor/go.etcd.io/bbolt/README.md @@ -275,7 +275,7 @@ should be writable. ### Using buckets Buckets are collections of key/value pairs within the database. All keys in a -bucket must be unique. You can create a bucket using the `DB.CreateBucket()` +bucket must be unique. You can create a bucket using the `Tx.CreateBucket()` function: ```go @@ -923,6 +923,7 @@ Below is a list of public, open source projects that use Bolt: * [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. * [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. * [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains +* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more) * [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". * [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. * [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. @@ -935,6 +936,7 @@ Below is a list of public, open source projects that use Bolt: * [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. +* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. * [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. * [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. diff --git a/vendor/github.com/etcd-io/bbolt/bolt_386.go b/vendor/go.etcd.io/bbolt/bolt_386.go index 4d35ee7cf..aee25960f 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_386.go +++ b/vendor/go.etcd.io/bbolt/bolt_386.go @@ -5,6 +5,3 @@ const maxMapSize = 0x7FFFFFFF // 2GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_amd64.go b/vendor/go.etcd.io/bbolt/bolt_amd64.go index 60a52dad5..5dd8f3f2a 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_amd64.go +++ b/vendor/go.etcd.io/bbolt/bolt_amd64.go @@ -5,6 +5,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/go.etcd.io/bbolt/bolt_arm.go b/vendor/go.etcd.io/bbolt/bolt_arm.go new file mode 100644 index 000000000..aee25960f --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_arm.go @@ -0,0 +1,7 @@ +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/etcd-io/bbolt/bolt_arm64.go b/vendor/go.etcd.io/bbolt/bolt_arm64.go index f5aa2a5ee..810dfd55c 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_arm64.go +++ b/vendor/go.etcd.io/bbolt/bolt_arm64.go @@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_linux.go b/vendor/go.etcd.io/bbolt/bolt_linux.go index 7707bcacf..7707bcacf 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_linux.go +++ b/vendor/go.etcd.io/bbolt/bolt_linux.go diff --git a/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/bolt_mips64x.go index baeb289fd..dd8ffe123 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go +++ b/vendor/go.etcd.io/bbolt/bolt_mips64x.go @@ -7,6 +7,3 @@ const maxMapSize = 0x8000000000 // 512GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/bolt_mipsx.go index 2d9b1a91f..a669703a4 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go +++ b/vendor/go.etcd.io/bbolt/bolt_mipsx.go @@ -7,6 +7,3 @@ const maxMapSize = 0x40000000 // 1GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_openbsd.go b/vendor/go.etcd.io/bbolt/bolt_openbsd.go index d7f50358e..d7f50358e 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_openbsd.go +++ b/vendor/go.etcd.io/bbolt/bolt_openbsd.go diff --git a/vendor/github.com/etcd-io/bbolt/bolt_ppc.go b/vendor/go.etcd.io/bbolt/bolt_ppc.go index 69804714a..84e545ef3 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_ppc.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc.go @@ -7,6 +7,3 @@ const maxMapSize = 0x7FFFFFFF // 2GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/bolt_ppc64.go index 356590857..a76120908 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64.go @@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go index 422c7c69d..c830f2fc7 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go @@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/bolt_riscv64.go index 07b4b47cd..c967613b0 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_riscv64.go +++ b/vendor/go.etcd.io/bbolt/bolt_riscv64.go @@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = true diff --git a/vendor/github.com/etcd-io/bbolt/bolt_s390x.go b/vendor/go.etcd.io/bbolt/bolt_s390x.go index 6d3fcb825..ff2a56097 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_s390x.go +++ b/vendor/go.etcd.io/bbolt/bolt_s390x.go @@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go index 5f2bb5145..2938fed58 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_unix.go +++ b/vendor/go.etcd.io/bbolt/bolt_unix.go @@ -1,4 +1,4 @@ -// +build !windows,!plan9,!solaris +// +build !windows,!plan9,!solaris,!aix package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go b/vendor/go.etcd.io/bbolt/bolt_unix_aix.go new file mode 100644 index 000000000..a64c16f51 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_unix_aix.go @@ -0,0 +1,90 @@ +// +build aix + +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go b/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go index babad6578..babad6578 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go +++ b/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go diff --git a/vendor/github.com/etcd-io/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go index fca178bd2..fca178bd2 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_windows.go +++ b/vendor/go.etcd.io/bbolt/bolt_windows.go diff --git a/vendor/github.com/etcd-io/bbolt/boltsync_unix.go b/vendor/go.etcd.io/bbolt/boltsync_unix.go index 9587afefe..9587afefe 100644 --- a/vendor/github.com/etcd-io/bbolt/boltsync_unix.go +++ b/vendor/go.etcd.io/bbolt/boltsync_unix.go diff --git a/vendor/github.com/etcd-io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go index 84bfd4d6a..d8750b148 100644 --- a/vendor/github.com/etcd-io/bbolt/bucket.go +++ b/vendor/go.etcd.io/bbolt/bucket.go @@ -123,10 +123,12 @@ func (b *Bucket) Bucket(name []byte) *Bucket { func (b *Bucket) openBucket(value []byte) *Bucket { var child = newBucket(b.tx) - // If unaligned load/stores are broken on this arch and value is - // unaligned simply clone to an aligned byte array. - unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 - + // Unaligned access requires a copy to be made. + const unalignedMask = unsafe.Alignof(struct { + bucket + page + }{}) - 1 + unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0 if unaligned { value = cloneBytes(value) } @@ -206,7 +208,7 @@ func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { } // DeleteBucket deletes a bucket at the given key. -// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. +// Returns an error if the bucket does not exist, or if the key represents a non-bucket value. func (b *Bucket) DeleteBucket(key []byte) error { if b.tx.db == nil { return ErrTxClosed @@ -228,7 +230,7 @@ func (b *Bucket) DeleteBucket(key []byte) error { // Recursively delete all child buckets. child := b.Bucket(key) err := child.ForEach(func(k, v []byte) error { - if v == nil { + if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 { if err := child.DeleteBucket(k); err != nil { return fmt.Errorf("delete bucket: %s", err) } @@ -409,7 +411,7 @@ func (b *Bucket) Stats() BucketStats { if p.count != 0 { // If page has any elements, add all element headers. - used += leafPageElementSize * int(p.count-1) + used += leafPageElementSize * uintptr(p.count-1) // Add all element key, value sizes. // The computation takes advantage of the fact that the position @@ -417,16 +419,16 @@ func (b *Bucket) Stats() BucketStats { // of all previous elements' keys and values. // It also includes the last element's header. lastElement := p.leafPageElement(p.count - 1) - used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) + used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize) } if b.root == 0 { // For inlined bucket just update the inline stats - s.InlineBucketInuse += used + s.InlineBucketInuse += int(used) } else { // For non-inlined bucket update all the leaf stats s.LeafPageN++ - s.LeafInuse += used + s.LeafInuse += int(used) s.LeafOverflowN += int(p.overflow) // Collect stats from sub-buckets. @@ -447,13 +449,13 @@ func (b *Bucket) Stats() BucketStats { // used totals the used bytes for the page // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) + used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1)) // Add size of all keys and values. // Again, use the fact that last element's position equals to // the total of key, value sizes of all previous elements. - used += int(lastElement.pos + lastElement.ksize) - s.BranchInuse += used + used += uintptr(lastElement.pos + lastElement.ksize) + s.BranchInuse += int(used) s.BranchOverflowN += int(p.overflow) } @@ -593,7 +595,7 @@ func (b *Bucket) inlineable() bool { // our threshold for inline bucket size. var size = pageHeaderSize for _, inode := range n.inodes { - size += leafPageElementSize + len(inode.key) + len(inode.value) + size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) if inode.flags&bucketLeafFlag != 0 { return false @@ -606,8 +608,8 @@ func (b *Bucket) inlineable() bool { } // Returns the maximum total size of a bucket to make it a candidate for inlining. -func (b *Bucket) maxInlineBucketSize() int { - return b.tx.db.pageSize / 4 +func (b *Bucket) maxInlineBucketSize() uintptr { + return uintptr(b.tx.db.pageSize / 4) } // write allocates and writes a bucket to a byte slice. diff --git a/vendor/github.com/etcd-io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go index 3000aced6..98aeb449a 100644 --- a/vendor/github.com/etcd-io/bbolt/cursor.go +++ b/vendor/go.etcd.io/bbolt/cursor.go @@ -366,7 +366,7 @@ func (c *Cursor) node() *node { } for _, ref := range c.stack[:len(c.stack)-1] { _assert(!n.isLeaf, "expected branch node") - n = n.childAt(int(ref.index)) + n = n.childAt(ref.index) } _assert(n.isLeaf, "expected leaf node") return n diff --git a/vendor/github.com/etcd-io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go index 870c8b1cc..80b0095cc 100644 --- a/vendor/github.com/etcd-io/bbolt/db.go +++ b/vendor/go.etcd.io/bbolt/db.go @@ -206,12 +206,12 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Open data file and separate sync handler for metadata writes. - db.path = path var err error - if db.file, err = db.openFile(db.path, flag|os.O_CREATE, mode); err != nil { + if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil { _ = db.close() return nil, err } + db.path = db.file.Name() // Lock file so that other processes using Bolt in read-write mode cannot // use the database at the same time. This would cause corruption since diff --git a/vendor/github.com/etcd-io/bbolt/doc.go b/vendor/go.etcd.io/bbolt/doc.go index 95f25f01c..95f25f01c 100644 --- a/vendor/github.com/etcd-io/bbolt/doc.go +++ b/vendor/go.etcd.io/bbolt/doc.go diff --git a/vendor/github.com/etcd-io/bbolt/errors.go b/vendor/go.etcd.io/bbolt/errors.go index 48758ca57..48758ca57 100644 --- a/vendor/github.com/etcd-io/bbolt/errors.go +++ b/vendor/go.etcd.io/bbolt/errors.go diff --git a/vendor/github.com/etcd-io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go index 587b8cc02..d441b6925 100644 --- a/vendor/github.com/etcd-io/bbolt/freelist.go +++ b/vendor/go.etcd.io/bbolt/freelist.go @@ -2,6 +2,7 @@ package bbolt import ( "fmt" + "reflect" "sort" "unsafe" ) @@ -71,7 +72,7 @@ func (f *freelist) size() int { // The first element will be used to store the count. See freelist.write. n++ } - return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) + return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n) } // count returns count of pages on the freelist @@ -93,8 +94,24 @@ func (f *freelist) pending_count() int { return count } -// copyall copies into dst a list of all free ids and all pending ids in one sorted list. +// copyallunsafe copies a list of all free ids and all pending ids in one sorted list. // f.count returns the minimum length required for dst. +func (f *freelist) copyallunsafe(dstptr unsafe.Pointer) { // dstptr is []pgid data pointer + m := make(pgids, 0, f.pending_count()) + for _, txp := range f.pending { + m = append(m, txp.ids...) + } + sort.Sort(m) + fpgids := f.getFreePageIDs() + sz := len(fpgids) + len(m) + dst := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(dstptr), + Len: sz, + Cap: sz, + })) + mergepgids(dst, fpgids, m) +} + func (f *freelist) copyall(dst []pgid) { m := make(pgids, 0, f.pending_count()) for _, txp := range f.pending { @@ -267,17 +284,21 @@ func (f *freelist) read(p *page) { } // If the page.count is at the max uint16 value (64k) then it's considered // an overflow and the size of the freelist is stored as the first element. - idx, count := 0, int(p.count) + var idx, count uintptr = 0, uintptr(p.count) if count == 0xFFFF { idx = 1 - count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + count = uintptr(*(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p)))) } // Copy the list of page ids from the freelist. if count == 0 { f.ids = nil } else { - ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count] + ids := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + idx*unsafe.Sizeof(pgid(0)), + Len: int(count), + Cap: int(count), + })) // copy the ids, so we don't modify on the freelist page directly idsCopy := make([]pgid, count) @@ -315,11 +336,11 @@ func (f *freelist) write(p *page) error { p.count = uint16(lenids) } else if lenids < 0xFFFF { p.count = uint16(lenids) - f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) + f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) } else { p.count = 0xFFFF - ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) - f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) + *(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) = pgid(lenids) + f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + unsafe.Sizeof(pgid(0)))) } return nil diff --git a/vendor/github.com/etcd-io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go index 6a03a6c3c..02ef2be04 100644 --- a/vendor/github.com/etcd-io/bbolt/freelist_hmap.go +++ b/vendor/go.etcd.io/bbolt/freelist_hmap.go @@ -27,7 +27,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid { f.allocs[pid] = txid for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+pgid(i)) + delete(f.cache, pid+i) } return pid } diff --git a/vendor/go.etcd.io/bbolt/go.mod b/vendor/go.etcd.io/bbolt/go.mod new file mode 100644 index 000000000..c2366daef --- /dev/null +++ b/vendor/go.etcd.io/bbolt/go.mod @@ -0,0 +1,5 @@ +module go.etcd.io/bbolt + +go 1.12 + +require golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 diff --git a/vendor/go.etcd.io/bbolt/go.sum b/vendor/go.etcd.io/bbolt/go.sum new file mode 100644 index 000000000..4ad15a488 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/etcd-io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go index 6c3fa553e..1690eef3f 100644 --- a/vendor/github.com/etcd-io/bbolt/node.go +++ b/vendor/go.etcd.io/bbolt/node.go @@ -3,6 +3,7 @@ package bbolt import ( "bytes" "fmt" + "reflect" "sort" "unsafe" ) @@ -41,19 +42,19 @@ func (n *node) size() int { sz, elsz := pageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) + sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) } - return sz + return int(sz) } // sizeLessThan returns true if the node is less than a given size. // This is an optimization to avoid calculating a large node when we only need // to know if it fits inside a certain page size. -func (n *node) sizeLessThan(v int) bool { +func (n *node) sizeLessThan(v uintptr) bool { sz, elsz := pageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) + sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) if sz >= v { return false } @@ -62,7 +63,7 @@ func (n *node) sizeLessThan(v int) bool { } // pageElementSize returns the size of each page element based on the type of node. -func (n *node) pageElementSize() int { +func (n *node) pageElementSize() uintptr { if n.isLeaf { return leafPageElementSize } @@ -207,39 +208,39 @@ func (n *node) write(p *page) { } // Loop over each item and write it to the page. - b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + bp := uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) for i, item := range n.inodes { _assert(len(item.key) > 0, "write: zero-length inode key") // Write the page element. if n.isLeaf { elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem))) elem.flags = item.flags elem.ksize = uint32(len(item.key)) elem.vsize = uint32(len(item.value)) } else { elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem))) elem.ksize = uint32(len(item.key)) elem.pgid = item.pgid _assert(elem.pgid != p.id, "write: circular dependency occurred") } - // If the length of key+value is larger than the max allocation size - // then we need to reallocate the byte array pointer. - // - // See: https://github.com/boltdb/bolt/pull/335 + // Create a slice to write into of needed size and advance + // byte pointer for next iteration. klen, vlen := len(item.key), len(item.value) - if len(b) < klen+vlen { - b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] - } + sz := klen + vlen + b := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: bp, + Len: sz, + Cap: sz, + })) + bp += uintptr(sz) // Write data for the element to the end of the page. - copy(b[0:], item.key) - b = b[klen:] - copy(b[0:], item.value) - b = b[vlen:] + l := copy(b, item.key) + copy(b[l:], item.value) } // DEBUG ONLY: n.dump() @@ -247,7 +248,7 @@ func (n *node) write(p *page) { // split breaks up a node into multiple smaller nodes, if appropriate. // This should only be called from the spill() function. -func (n *node) split(pageSize int) []*node { +func (n *node) split(pageSize uintptr) []*node { var nodes []*node node := n @@ -270,7 +271,7 @@ func (n *node) split(pageSize int) []*node { // splitTwo breaks up a node into two smaller nodes, if appropriate. // This should only be called from the split() function. -func (n *node) splitTwo(pageSize int) (*node, *node) { +func (n *node) splitTwo(pageSize uintptr) (*node, *node) { // Ignore the split if the page doesn't have at least enough nodes for // two pages or if the nodes can fit in a single page. if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { @@ -312,18 +313,18 @@ func (n *node) splitTwo(pageSize int) (*node, *node) { // splitIndex finds the position where a page will fill a given threshold. // It returns the index as well as the size of the first page. // This is only be called from split(). -func (n *node) splitIndex(threshold int) (index, sz int) { +func (n *node) splitIndex(threshold int) (index, sz uintptr) { sz = pageHeaderSize // Loop until we only have the minimum number of keys required for the second page. for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { - index = i + index = uintptr(i) inode := n.inodes[i] - elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value)) // If we have at least the minimum number of keys and adding another // node would put us over the threshold then exit and return. - if i >= minKeysPerPage && sz+elsize > threshold { + if index >= minKeysPerPage && sz+elsize > uintptr(threshold) { break } @@ -356,7 +357,7 @@ func (n *node) spill() error { n.children = nil // Split nodes into appropriate sizes. The first node will always be n. - var nodes = n.split(tx.db.pageSize) + var nodes = n.split(uintptr(tx.db.pageSize)) for _, node := range nodes { // Add node's page to the freelist if it's not new. if node.pgid > 0 { @@ -587,9 +588,11 @@ func (n *node) dump() { type nodes []*node -func (s nodes) Len() int { return len(s) } -func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { + return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 +} // inode represents an internal node inside of a node. // It can be used to point to elements in a page or point diff --git a/vendor/github.com/etcd-io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go index bca9615f0..b5c169978 100644 --- a/vendor/github.com/etcd-io/bbolt/page.go +++ b/vendor/go.etcd.io/bbolt/page.go @@ -3,16 +3,17 @@ package bbolt import ( "fmt" "os" + "reflect" "sort" "unsafe" ) -const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) +const pageHeaderSize = unsafe.Sizeof(page{}) const minKeysPerPage = 2 -const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) -const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) +const branchPageElementSize = unsafe.Sizeof(branchPageElement{}) +const leafPageElementSize = unsafe.Sizeof(leafPageElement{}) const ( branchPageFlag = 0x01 @@ -32,7 +33,6 @@ type page struct { flags uint16 count uint16 overflow uint32 - ptr uintptr } // typ returns a human readable page type string used for debugging. @@ -51,13 +51,13 @@ func (p *page) typ() string { // meta returns a pointer to the metadata section of the page. func (p *page) meta() *meta { - return (*meta)(unsafe.Pointer(&p.ptr)) + return (*meta)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) } // leafPageElement retrieves the leaf node by index func (p *page) leafPageElement(index uint16) *leafPageElement { - n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] - return n + off := uintptr(index) * unsafe.Sizeof(leafPageElement{}) + return (*leafPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off)) } // leafPageElements retrieves a list of leaf nodes. @@ -65,12 +65,17 @@ func (p *page) leafPageElements() []leafPageElement { if p.count == 0 { return nil } - return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] + return *(*[]leafPageElement)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p), + Len: int(p.count), + Cap: int(p.count), + })) } // branchPageElement retrieves the branch node by index func (p *page) branchPageElement(index uint16) *branchPageElement { - return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] + off := uintptr(index) * unsafe.Sizeof(branchPageElement{}) + return (*branchPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off)) } // branchPageElements retrieves a list of branch nodes. @@ -78,12 +83,20 @@ func (p *page) branchPageElements() []branchPageElement { if p.count == 0 { return nil } - return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] + return *(*[]branchPageElement)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p), + Len: int(p.count), + Cap: int(p.count), + })) } // dump writes n bytes of the page to STDERR as hex output. func (p *page) hexdump(n int) { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] + buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(p)), + Len: n, + Cap: n, + })) fmt.Fprintf(os.Stderr, "%x\n", buf) } @@ -102,8 +115,11 @@ type branchPageElement struct { // key returns a byte slice of the node key. func (n *branchPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos), + Len: int(n.ksize), + Cap: int(n.ksize), + })) } // leafPageElement represents a node on a leaf page. @@ -116,14 +132,20 @@ type leafPageElement struct { // key returns a byte slice of the node key. func (n *leafPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos), + Len: int(n.ksize), + Cap: int(n.ksize), + })) } // value returns a byte slice of the node value. func (n *leafPageElement) value() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos) + uintptr(n.ksize), + Len: int(n.vsize), + Cap: int(n.vsize), + })) } // PageInfo represents human readable information about a page. diff --git a/vendor/github.com/etcd-io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go index 2df7688c2..13937cdbf 100644 --- a/vendor/github.com/etcd-io/bbolt/tx.go +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -4,6 +4,7 @@ import ( "fmt" "io" "os" + "reflect" "sort" "strings" "time" @@ -527,7 +528,7 @@ func (tx *Tx) write() error { offset := int64(p.id) * int64(tx.db.pageSize) // Write out page in "max allocation" sized chunks. - ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) + ptr := uintptr(unsafe.Pointer(p)) for { // Limit our write to our max allocation size. sz := size @@ -536,7 +537,11 @@ func (tx *Tx) write() error { } // Write chunk to disk. - buf := ptr[:sz] + buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: ptr, + Len: sz, + Cap: sz, + })) if _, err := tx.db.ops.writeAt(buf, offset); err != nil { return err } @@ -552,7 +557,7 @@ func (tx *Tx) write() error { // Otherwise move offset forward and move pointer to next chunk. offset += int64(sz) - ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) + ptr += uintptr(sz) } } @@ -571,7 +576,11 @@ func (tx *Tx) write() error { continue } - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] + buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(p)), + Len: tx.db.pageSize, + Cap: tx.db.pageSize, + })) // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 for i := range buf { diff --git a/vendor/modules.txt b/vendor/modules.txt index a01d5b1c6..1ebb9e1fc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -88,7 +88,7 @@ github.com/containers/common/pkg/config github.com/containers/common/pkg/unshare # github.com/containers/conmon v2.0.10+incompatible github.com/containers/conmon/runner/config -# github.com/containers/image/v5 v5.2.1 +# github.com/containers/image/v5 v5.3.1 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -101,6 +101,7 @@ github.com/containers/image/v5/docker/tarfile github.com/containers/image/v5/image github.com/containers/image/v5/internal/iolimits github.com/containers/image/v5/internal/pkg/keyctl +github.com/containers/image/v5/internal/pkg/platform github.com/containers/image/v5/internal/tmpdir github.com/containers/image/v5/manifest github.com/containers/image/v5/oci/archive @@ -265,8 +266,6 @@ github.com/docker/libnetwork/types # github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 github.com/docker/spdystream github.com/docker/spdystream/spdy -# github.com/etcd-io/bbolt v1.3.3 -github.com/etcd-io/bbolt # github.com/fsnotify/fsnotify v1.4.9 github.com/fsnotify/fsnotify # github.com/fsouza/go-dockerclient v1.6.3 @@ -504,7 +503,7 @@ github.com/uber/jaeger-client-go/transport github.com/uber/jaeger-client-go/utils # github.com/uber/jaeger-lib v2.2.0+incompatible github.com/uber/jaeger-lib/metrics -# github.com/ulikunitz/xz v0.5.6 +# github.com/ulikunitz/xz v0.5.7 github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog @@ -517,7 +516,7 @@ github.com/varlink/go/varlink/idl github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v4 v4.11.2 +# github.com/vbauerster/mpb/v4 v4.12.2 github.com/vbauerster/mpb/v4 github.com/vbauerster/mpb/v4/cwriter github.com/vbauerster/mpb/v4/decor @@ -533,6 +532,8 @@ github.com/xeipuuv/gojsonpointer github.com/xeipuuv/gojsonreference # github.com/xeipuuv/gojsonschema v0.0.0-20190816131739-be0936907f66 github.com/xeipuuv/gojsonschema +# go.etcd.io/bbolt v1.3.4 +go.etcd.io/bbolt # go.opencensus.io v0.22.0 go.opencensus.io go.opencensus.io/internal |