summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/containers/common/libimage/copier.go12
-rw-r--r--vendor/github.com/containers/common/libimage/image.go8
-rw-r--r--vendor/github.com/containers/common/libimage/import.go2
-rw-r--r--vendor/github.com/containers/common/libimage/manifests/manifests.go18
-rw-r--r--vendor/github.com/containers/common/libimage/pull.go80
-rw-r--r--vendor/github.com/containers/common/libimage/search.go4
-rw-r--r--vendor/github.com/containers/common/pkg/auth/auth.go153
-rw-r--r--vendor/github.com/containers/common/pkg/auth/cli.go20
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go11
-rw-r--r--vendor/github.com/containers/common/pkg/config/pull_policy.go8
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/types.go2
-rw-r--r--vendor/github.com/containers/image/v5/copy/copy.go34
-rw-r--r--vendor/github.com/containers/image/v5/directory/directory_dest.go26
-rw-r--r--vendor/github.com/containers/image/v5/directory/directory_transport.go6
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_client.go29
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image.go4
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_dest.go16
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_src.go15
-rw-r--r--vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go3
-rw-r--r--vendor/github.com/containers/image/v5/manifest/common.go6
-rw-r--r--vendor/github.com/containers/image/v5/manifest/docker_schema2.go14
-rw-r--r--vendor/github.com/containers/image/v5/manifest/oci.go14
-rw-r--r--vendor/github.com/containers/image/v5/oci/layout/oci_src.go4
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift.go9
-rw-r--r--vendor/github.com/containers/image/v5/pkg/compression/compression.go20
-rw-r--r--vendor/github.com/containers/image/v5/pkg/compression/internal/types.go2
-rw-r--r--vendor/github.com/containers/image/v5/pkg/compression/types/types.go28
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config.go16
-rw-r--r--vendor/github.com/containers/image/v5/types/types.go2
-rw-r--r--vendor/github.com/containers/image/v5/version/version.go2
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/quota/projectquota.go57
-rw-r--r--vendor/github.com/dtylman/scp/.gitignore25
-rw-r--r--vendor/github.com/dtylman/scp/LICENSE21
-rw-r--r--vendor/github.com/dtylman/scp/README.md42
-rw-r--r--vendor/github.com/dtylman/scp/msg.go121
-rw-r--r--vendor/github.com/dtylman/scp/scp.go153
37 files changed, 785 insertions, 204 deletions
diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go
index 34cc0d45d..a44f098ad 100644
--- a/vendor/github.com/containers/common/libimage/copier.go
+++ b/vendor/github.com/containers/common/libimage/copier.go
@@ -342,7 +342,7 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere
}
}
- var copiedManifest []byte
+ var returnManifest []byte
f := func() error {
opts := c.imageCopyOptions
if sourceInsecure != nil {
@@ -354,11 +354,13 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere
opts.DestinationCtx.DockerInsecureSkipTLSVerify = value
}
- var err error
- copiedManifest, err = copy.Image(ctx, c.policyContext, destination, source, &opts)
+ copiedManifest, err := copy.Image(ctx, c.policyContext, destination, source, &opts)
+ if err == nil {
+ returnManifest = copiedManifest
+ }
return err
}
- return copiedManifest, retry.RetryIfNecessary(ctx, f, &c.retryOptions)
+ return returnManifest, retry.RetryIfNecessary(ctx, f, &c.retryOptions)
}
// checkRegistrySourcesAllows checks the $BUILD_REGISTRY_SOURCES environment
@@ -369,7 +371,7 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere
// If set, the insecure return value indicates whether the registry is set to
// be insecure.
//
-// NOTE: this functionality is required by Buildah.
+// NOTE: this functionality is required by Buildah for OpenShift.
func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err error) {
registrySources, ok := os.LookupEnv("BUILD_REGISTRY_SOURCES")
if !ok || registrySources == "" {
diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go
index 5b060a185..c47e63339 100644
--- a/vendor/github.com/containers/common/libimage/image.go
+++ b/vendor/github.com/containers/common/libimage/image.go
@@ -836,9 +836,9 @@ func (i *Image) Manifest(ctx context.Context) (rawManifest []byte, mimeType stri
return src.GetManifest(ctx, nil)
}
-// getImageDigest creates an image object and uses the hex value of the digest as the image ID
-// for parsing the store reference
-func getImageDigest(ctx context.Context, src types.ImageReference, sys *types.SystemContext) (string, error) {
+// getImageID creates an image object and uses the hex value of the config
+// blob's digest (if it has one) as the image ID for parsing the store reference
+func getImageID(ctx context.Context, src types.ImageReference, sys *types.SystemContext) (string, error) {
newImg, err := src.NewImage(ctx, sys)
if err != nil {
return "", err
@@ -852,5 +852,5 @@ func getImageDigest(ctx context.Context, src types.ImageReference, sys *types.Sy
if err = imageDigest.Validate(); err != nil {
return "", errors.Wrapf(err, "error getting config info")
}
- return "@" + imageDigest.Hex(), nil
+ return "@" + imageDigest.Encoded(), nil
}
diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go
index 9926aaec7..bcfb4e129 100644
--- a/vendor/github.com/containers/common/libimage/import.go
+++ b/vendor/github.com/containers/common/libimage/import.go
@@ -86,7 +86,7 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption
return "", err
}
- id, err := getImageDigest(ctx, srcRef, r.systemContextCopy())
+ id, err := getImageID(ctx, srcRef, r.systemContextCopy())
if err != nil {
return "", err
}
diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go
index 875c2948d..81b5343c0 100644
--- a/vendor/github.com/containers/common/libimage/manifests/manifests.go
+++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go
@@ -18,6 +18,7 @@ import (
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
+ "github.com/containers/storage/pkg/lockfile"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
@@ -395,3 +396,20 @@ func (l *list) Remove(instanceDigest digest.Digest) error {
}
return err
}
+
+// LockerForImage returns a Locker for a given image record. It's recommended
+// that processes which use LoadFromImage() to load a list from an image and
+// then use that list's SaveToImage() method to save a modified version of the
+// list to that image record use this lock to avoid accidentally wiping out
+// changes that another process is also attempting to make.
+func LockerForImage(store storage.Store, image string) (lockfile.Locker, error) {
+ img, err := store.Image(image)
+ if err != nil {
+ return nil, errors.Wrapf(err, "locating image %q for locating lock", image)
+ }
+ d := digest.NewDigestFromEncoded(digest.Canonical, img.ID)
+ if err := d.Validate(); err != nil {
+ return nil, errors.Wrapf(err, "coercing image ID for %q into a digest", image)
+ }
+ return store.GetDigestLock(d)
+}
diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go
index 1a6ad1ce2..97347178a 100644
--- a/vendor/github.com/containers/common/libimage/pull.go
+++ b/vendor/github.com/containers/common/libimage/pull.go
@@ -12,6 +12,7 @@ import (
dockerArchiveTransport "github.com/containers/image/v5/docker/archive"
dockerDaemonTransport "github.com/containers/image/v5/docker/daemon"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
ociArchiveTransport "github.com/containers/image/v5/oci/archive"
ociTransport "github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/pkg/shortnames"
@@ -19,6 +20,7 @@ import (
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
+ digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -192,19 +194,19 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference,
imageName = storageName
case ociArchiveTransport.Transport.Name():
- manifest, err := ociArchiveTransport.LoadManifestDescriptor(ref)
+ manifestDescriptor, err := ociArchiveTransport.LoadManifestDescriptor(ref)
if err != nil {
return nil, err
}
- // if index.json has no reference name, compute the image digest instead
- if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" {
- storageName, err = getImageDigest(ctx, ref, nil)
+ // if index.json has no reference name, compute the image ID instead
+ if manifestDescriptor.Annotations == nil || manifestDescriptor.Annotations["org.opencontainers.image.ref.name"] == "" {
+ storageName, err = getImageID(ctx, ref, nil)
if err != nil {
return nil, err
}
imageName = "sha256:" + storageName[1:]
} else {
- storageName = manifest.Annotations["org.opencontainers.image.ref.name"]
+ storageName = manifestDescriptor.Annotations["org.opencontainers.image.ref.name"]
named, err := NormalizeName(storageName)
if err != nil {
return nil, err
@@ -248,7 +250,7 @@ func (r *Runtime) storageReferencesReferencesFromArchiveReader(ctx context.Conte
var imageNames []string
if len(destNames) == 0 {
- destName, err := getImageDigest(ctx, readerRef, &r.systemContext)
+ destName, err := getImageID(ctx, readerRef, &r.systemContext)
if err != nil {
return nil, nil, err
}
@@ -316,8 +318,8 @@ func (r *Runtime) copyFromDockerArchiveReaderReference(ctx context.Context, read
}
// copyFromRegistry pulls the specified, possibly unqualified, name from a
-// registry. On successful pull it returns the used fully-qualified name that
-// can later be used to look up the image in the local containers storage.
+// registry. On successful pull it returns the ID of the image in local
+// storage.
//
// If options.All is set, all tags from the specified registry will be pulled.
func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference, inputName string, pullPolicy config.PullPolicy, options *PullOptions) ([]string, error) {
@@ -337,7 +339,7 @@ func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference
return nil, err
}
- pulledTags := []string{}
+ pulledIDs := []string{}
for _, tag := range tags {
select { // Let's be gentle with Podman remote.
case <-ctx.Done():
@@ -353,15 +355,54 @@ func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference
if err != nil {
return nil, err
}
- pulledTags = append(pulledTags, pulled...)
+ pulledIDs = append(pulledIDs, pulled...)
}
- return pulledTags, nil
+ return pulledIDs, nil
+}
+
+// imageIDsForManifest() parses the manifest of the copied image and then looks
+// up the IDs of the matching image. There's a small slice of time, between
+// when we copy the image into local storage and when we go to look for it
+// using the name that we gave it when we copied it, when the name we wanted to
+// assign to the image could have been moved, but the image's ID will remain
+// the same until it is deleted.
+func (r *Runtime) imagesIDsForManifest(manifestBytes []byte, sys *types.SystemContext) ([]string, error) {
+ var imageDigest digest.Digest
+ manifestType := manifest.GuessMIMEType(manifestBytes)
+ if manifest.MIMETypeIsMultiImage(manifestType) {
+ list, err := manifest.ListFromBlob(manifestBytes, manifestType)
+ if err != nil {
+ return nil, errors.Wrapf(err, "parsing manifest list")
+ }
+ d, err := list.ChooseInstance(sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "choosing instance from manifest list")
+ }
+ imageDigest = d
+ } else {
+ d, err := manifest.Digest(manifestBytes)
+ if err != nil {
+ return nil, errors.Wrapf(err, "digesting manifest")
+ }
+ imageDigest = d
+ }
+ var results []string
+ images, err := r.store.ImagesByDigest(imageDigest)
+ if err != nil {
+ return nil, errors.Wrapf(err, "listing images by manifest digest")
+ }
+ for _, image := range images {
+ results = append(results, image.ID)
+ }
+ if len(results) == 0 {
+ return nil, errors.Wrapf(storage.ErrImageUnknown, "identifying new image by manifest digest")
+ }
+ return results, nil
}
// copySingleImageFromRegistry pulls the specified, possibly unqualified, name
-// from a registry. On successful pull it returns the used fully-qualified
-// name that can later be used to look up the image in the local containers
+// from a registry. On successful pull it returns the ID of the image in local
// storage.
func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName string, pullPolicy config.PullPolicy, options *PullOptions) ([]string, error) { //nolint:gocyclo
// Sanity check.
@@ -375,7 +416,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
err error
)
- // Always check if there's a local image. If, we should use it's
+ // Always check if there's a local image. If so, we should use its
// resolved name for pulling. Assume we're doing a `pull foo`.
// If there's already a local image "localhost/foo", then we should
// attempt pulling that instead of doing the full short-name dance.
@@ -454,7 +495,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
}
}
- // If we found a local image, we should use it's locally resolved name
+ // If we found a local image, we should use its locally resolved name
// (see containers/buildah/issues/2904). An exception is if a custom
// platform is specified (e.g., `--arch=arm64`). In that case, we need
// to pessimistically pull the image since some images declare wrong
@@ -462,7 +503,8 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
// containers/podman/issues/10682).
//
// In other words: multi-arch support can only be as good as the images
- // in the wild.
+ // in the wild, so we shouldn't break things for our users by trying to
+ // insist that they make sense.
if localImage != nil && !customPlatform {
if imageName != resolvedImageName {
logrus.Debugf("Image %s resolved to local image %s which will be used for pulling", imageName, resolvedImageName)
@@ -541,7 +583,8 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
return nil, err
}
}
- if _, err := c.copy(ctx, srcRef, destRef); err != nil {
+ var manifestBytes []byte
+ if manifestBytes, err = c.copy(ctx, srcRef, destRef); err != nil {
logrus.Debugf("Error pulling candidate %s: %v", candidateString, err)
pullErrors = append(pullErrors, err)
continue
@@ -554,6 +597,9 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
}
logrus.Debugf("Pulled candidate %s successfully", candidateString)
+ if ids, err := r.imagesIDsForManifest(manifestBytes, sys); err == nil {
+ return ids, nil
+ }
return []string{candidate.Value.String()}, nil
}
diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go
index 4d1b842e7..df29bc7da 100644
--- a/vendor/github.com/containers/common/libimage/search.go
+++ b/vendor/github.com/containers/common/libimage/search.go
@@ -185,6 +185,10 @@ func (r *Runtime) searchImageInRegistry(ctx context.Context, term, registry stri
sys.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify
}
+ if options.Authfile != "" {
+ sys.AuthFilePath = options.Authfile
+ }
+
if options.ListTags {
results, err := searchRepositoryTags(ctx, sys, registry, term, options)
if err != nil {
diff --git a/vendor/github.com/containers/common/pkg/auth/auth.go b/vendor/github.com/containers/common/pkg/auth/auth.go
index d2e75c1f7..093da0299 100644
--- a/vendor/github.com/containers/common/pkg/auth/auth.go
+++ b/vendor/github.com/containers/common/pkg/auth/auth.go
@@ -9,6 +9,7 @@ import (
"strings"
"github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/docker/config"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
@@ -69,30 +70,50 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
systemContext = systemContextWithOptions(systemContext, opts.AuthFile, opts.CertDir)
var (
- server string
- err error
+ authConfig types.DockerAuthConfig
+ key, registry string
+ ref reference.Named
+ err error
)
- if len(args) > 1 {
- return errors.New("login accepts only one registry to login to")
- }
- if len(args) == 0 {
+ l := len(args)
+ switch l {
+ case 0:
if !opts.AcceptUnspecifiedRegistry {
return errors.New("please provide a registry to login to")
}
- if server, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
+ if key, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
return err
}
- logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", server)
- } else {
- server = getRegistryName(args[0])
+ registry = key
+ logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", key)
+
+ case 1:
+ key, registry, ref, err = parseRegistryArgument(args[0], opts.AcceptRepositories)
+ if err != nil {
+ return err
+ }
+
+ default:
+ return errors.New("login accepts only one registry to login to")
+
}
- authConfig, err := config.GetCredentials(systemContext, server)
- if err != nil {
- return errors.Wrap(err, "reading auth file")
+
+ if ref != nil {
+ authConfig, err = config.GetCredentialsForRef(systemContext, ref)
+ if err != nil {
+ return errors.Wrap(err, "get credentials for repository")
+ }
+ } else {
+ // nolint: staticcheck
+ authConfig, err = config.GetCredentials(systemContext, registry)
+ if err != nil {
+ return errors.Wrap(err, "get credentials")
+ }
}
+
if opts.GetLoginSet {
if authConfig.Username == "" {
- return errors.Errorf("not logged into %s", server)
+ return errors.Errorf("not logged into %s", key)
}
fmt.Fprintf(opts.Stdout, "%s\n", authConfig.Username)
return nil
@@ -119,9 +140,9 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
// If no username and no password is specified, try to use existing ones.
if opts.Username == "" && password == "" && authConfig.Username != "" && authConfig.Password != "" {
- fmt.Println("Authenticating with existing credentials...")
- if err := docker.CheckAuth(ctx, systemContext, authConfig.Username, authConfig.Password, server); err == nil {
- fmt.Fprintln(opts.Stdout, "Existing credentials are valid. Already logged in to", server)
+ fmt.Fprintf(opts.Stdout, "Authenticating with existing credentials for %s\n", key)
+ if err := docker.CheckAuth(ctx, systemContext, authConfig.Username, authConfig.Password, registry); err == nil {
+ fmt.Fprintf(opts.Stdout, "Existing credentials are valid. Already logged in to %s\n", registry)
return nil
}
fmt.Fprintln(opts.Stdout, "Existing credentials are invalid, please enter valid username and password")
@@ -132,9 +153,9 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
return errors.Wrap(err, "getting username and password")
}
- if err = docker.CheckAuth(ctx, systemContext, username, password, server); err == nil {
+ if err = docker.CheckAuth(ctx, systemContext, username, password, registry); err == nil {
// Write the new credentials to the authfile
- desc, err := config.SetCredentials(systemContext, server, username, password)
+ desc, err := config.SetCredentials(systemContext, key, username, password)
if err != nil {
return err
}
@@ -147,10 +168,45 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
return nil
}
if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok {
- logrus.Debugf("error logging into %q: %v", server, unauthorized)
- return errors.Errorf("error logging into %q: invalid username/password", server)
+ logrus.Debugf("error logging into %q: %v", key, unauthorized)
+ return errors.Errorf("error logging into %q: invalid username/password", key)
+ }
+ return errors.Wrapf(err, "authenticating creds for %q", key)
+}
+
+// parseRegistryArgument verifies the provided arg depending if we accept
+// repositories or not.
+func parseRegistryArgument(arg string, acceptRepositories bool) (key, registry string, maybeRef reference.Named, err error) {
+ if !acceptRepositories {
+ registry = getRegistryName(arg)
+ key = registry
+ return key, registry, maybeRef, nil
+ }
+
+ key = trimScheme(arg)
+ if key != arg {
+ return key, registry, nil, errors.New("credentials key has https[s]:// prefix")
}
- return errors.Wrapf(err, "authenticating creds for %q", server)
+
+ registry = getRegistryName(key)
+ if registry == key {
+ // We cannot parse a reference from a registry, so we stop here
+ return key, registry, nil, nil
+ }
+
+ ref, parseErr := reference.ParseNamed(key)
+ if parseErr != nil {
+ return key, registry, nil, errors.Wrapf(parseErr, "parse reference from %q", key)
+ }
+
+ if !reference.IsNameOnly(ref) {
+ return key, registry, nil, errors.Errorf("reference %q contains tag or digest", ref.String())
+ }
+
+ maybeRef = ref
+ registry = reference.Domain(ref)
+
+ return key, registry, maybeRef, nil
}
// getRegistryName scrubs and parses the input to get the server name
@@ -158,13 +214,21 @@ func getRegistryName(server string) string {
// removes 'http://' or 'https://' from the front of the
// server/registry string if either is there. This will be mostly used
// for user input from 'Buildah login' and 'Buildah logout'.
- server = strings.TrimPrefix(strings.TrimPrefix(server, "https://"), "http://")
+ server = trimScheme(server)
// gets the registry from the input. If the input is of the form
// quay.io/myuser/myimage, it will parse it and just return quay.io
split := strings.Split(server, "/")
return split[0]
}
+// trimScheme removes the HTTP(s) scheme from the provided repository.
+func trimScheme(repository string) string {
+ // removes 'http://' or 'https://' from the front of the
+ // server/registry string if either is there. This will be mostly used
+ // for user input from 'Buildah login' and 'Buildah logout'.
+ return strings.TrimPrefix(strings.TrimPrefix(repository, "https://"), "http://")
+}
+
// getUserAndPass gets the username and password from STDIN if not given
// using the -u and -p flags. If the username prompt is left empty, the
// displayed userFromAuthFile will be used instead.
@@ -209,8 +273,9 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
systemContext = systemContextWithOptions(systemContext, opts.AuthFile, "")
var (
- server string
- err error
+ key, registry string
+ ref reference.Named
+ err error
)
if len(args) > 1 {
return errors.New("logout accepts only one registry to logout from")
@@ -219,16 +284,20 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
if !opts.AcceptUnspecifiedRegistry {
return errors.New("please provide a registry to logout from")
}
- if server, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
+ if key, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
return err
}
- logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", server)
+ registry = key
+ logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", key)
}
if len(args) != 0 {
if opts.All {
return errors.New("--all takes no arguments")
}
- server = getRegistryName(args[0])
+ key, registry, ref, err = parseRegistryArgument(args[0], opts.AcceptRepositories)
+ if err != nil {
+ return err
+ }
}
if opts.All {
@@ -239,24 +308,34 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
return nil
}
- err = config.RemoveAuthentication(systemContext, server)
+ err = config.RemoveAuthentication(systemContext, key)
switch errors.Cause(err) {
case nil:
- fmt.Fprintf(opts.Stdout, "Removed login credentials for %s\n", server)
+ fmt.Fprintf(opts.Stdout, "Removed login credentials for %s\n", key)
return nil
case config.ErrNotLoggedIn:
- authConfig, err := config.GetCredentials(systemContext, server)
- if err != nil {
- return errors.Wrap(err, "reading auth file")
+ var authConfig types.DockerAuthConfig
+ if ref != nil {
+ authConfig, err = config.GetCredentialsForRef(systemContext, ref)
+ if err != nil {
+ return errors.Wrap(err, "get credentials for repository")
+ }
+ } else {
+ // nolint: staticcheck
+ authConfig, err = config.GetCredentials(systemContext, registry)
+ if err != nil {
+ return errors.Wrap(err, "get credentials")
+ }
}
- authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, server)
+
+ authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, registry)
if authConfig.Username != "" && authConfig.Password != "" && authInvalid == nil {
- fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", server)
+ fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", key)
return nil
}
- return errors.Errorf("Not logged into %s\n", server)
+ return errors.Errorf("Not logged into %s\n", key)
default:
- return errors.Wrapf(err, "logging out of %q", server)
+ return errors.Wrapf(err, "logging out of %q", key)
}
}
diff --git a/vendor/github.com/containers/common/pkg/auth/cli.go b/vendor/github.com/containers/common/pkg/auth/cli.go
index 5a7c1137c..7266bf48b 100644
--- a/vendor/github.com/containers/common/pkg/auth/cli.go
+++ b/vendor/github.com/containers/common/pkg/auth/cli.go
@@ -14,13 +14,14 @@ type LoginOptions struct {
// CLI flags managed by the FlagSet returned by GetLoginFlags
// Callers that use GetLoginFlags should not need to touch these values at all; callers that use
// other CLI frameworks should set them based on user input.
- AuthFile string
- CertDir string
- Password string
- Username string
- StdinPassword bool
- GetLoginSet bool
- Verbose bool // set to true for verbose output
+ AuthFile string
+ CertDir string
+ Password string
+ Username string
+ StdinPassword bool
+ GetLoginSet bool
+ Verbose bool // set to true for verbose output
+ AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries
// Options caller can set
Stdin io.Reader // set to os.Stdin
Stdout io.Writer // set to os.Stdout
@@ -32,8 +33,9 @@ type LogoutOptions struct {
// CLI flags managed by the FlagSet returned by GetLogoutFlags
// Callers that use GetLogoutFlags should not need to touch these values at all; callers that use
// other CLI frameworks should set them based on user input.
- AuthFile string
- All bool
+ AuthFile string
+ All bool
+ AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries
// Options caller can set
Stdout io.Writer // set to os.Stdout
AcceptUnspecifiedRegistry bool // set to true if allows logout with unspecified registry
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index 84876026d..008cfb642 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -637,9 +637,14 @@ func (c *Config) CheckCgroupsAndAdjustConfig() {
session := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
hasSession := session != ""
- if hasSession && strings.HasPrefix(session, "unix:path=") {
- _, err := os.Stat(strings.TrimPrefix(session, "unix:path="))
- hasSession = err == nil
+ if hasSession {
+ for _, part := range strings.Split(session, ",") {
+ if strings.HasPrefix(part, "unix:path=") {
+ _, err := os.Stat(strings.TrimPrefix(part, "unix:path="))
+ hasSession = err == nil
+ break
+ }
+ }
}
if !hasSession && unshare.GetRootlessUID() != 0 {
diff --git a/vendor/github.com/containers/common/pkg/config/pull_policy.go b/vendor/github.com/containers/common/pkg/config/pull_policy.go
index 7c32dd660..8c1f0ec29 100644
--- a/vendor/github.com/containers/common/pkg/config/pull_policy.go
+++ b/vendor/github.com/containers/common/pkg/config/pull_policy.go
@@ -76,13 +76,13 @@ func (p PullPolicy) Validate() error {
// * "never" <-> PullPolicyNever
func ParsePullPolicy(s string) (PullPolicy, error) {
switch s {
- case "always":
+ case "always", "Always":
return PullPolicyAlways, nil
- case "missing", "ifnotpresent", "":
+ case "missing", "Missing", "ifnotpresent", "IfNotPresent", "":
return PullPolicyMissing, nil
- case "newer", "ifnewer":
+ case "newer", "Newer", "ifnewer", "IfNewer":
return PullPolicyNewer, nil
- case "never":
+ case "never", "Never":
return PullPolicyNever, nil
default:
return PullPolicyUnsupported, errors.Errorf("unsupported pull policy %q", s)
diff --git a/vendor/github.com/containers/common/pkg/seccomp/types.go b/vendor/github.com/containers/common/pkg/seccomp/types.go
index 36712458a..07751f729 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/types.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/types.go
@@ -7,7 +7,7 @@ package seccomp
// Seccomp represents the config for a seccomp profile for syscall restriction.
type Seccomp struct {
DefaultAction Action `json:"defaultAction"`
- DefaultErrnoRet *uint `json:"defaultErrnoRet"`
+ DefaultErrnoRet *uint `json:"defaultErrnoRet,omitempty"`
// Architectures is kept to maintain backward compatibility with the old
// seccomp profile.
Architectures []Arch `json:"architectures,omitempty"`
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index 9bc479d23..b4ff8aa10 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -20,6 +20,7 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache"
"github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
@@ -57,7 +58,7 @@ var compressionBufferSize = 1048576
// expectedCompressionFormats is used to check if a blob with a specified media type is compressed
// using the algorithm that the media type says it should be compressed with
-var expectedCompressionFormats = map[string]*compression.Algorithm{
+var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip,
imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd,
manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
@@ -117,13 +118,12 @@ type copier struct {
progress chan types.ProgressProperties
blobInfoCache internalblobinfocache.BlobInfoCache2
copyInParallel bool
- compressionFormat compression.Algorithm
+ compressionFormat compressiontypes.Algorithm
compressionLevel *int
ociDecryptConfig *encconfig.DecryptConfig
ociEncryptConfig *encconfig.EncryptConfig
maxParallelDownloads uint
downloadForeignLayers bool
- fetchPartialBlobs bool
}
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
@@ -207,9 +207,6 @@ type Options struct {
// Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type
// to not indicate "nondistributable".
DownloadForeignLayers bool
-
- // FetchPartialBlobs indicates whether to attempt to fetch the blob partially. Experimental.
- FetchPartialBlobs bool
}
// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value
@@ -290,15 +287,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
ociEncryptConfig: options.OciEncryptConfig,
maxParallelDownloads: options.MaxParallelDownloads,
downloadForeignLayers: options.DownloadForeignLayers,
- fetchPartialBlobs: options.FetchPartialBlobs,
}
// Default to using gzip compression unless specified otherwise.
if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil {
- algo, err := compression.AlgorithmByName("gzip")
- if err != nil {
- return nil, err
- }
- c.compressionFormat = algo
+ c.compressionFormat = compression.Gzip
} else {
c.compressionFormat = *options.DestinationCtx.CompressionFormat
}
@@ -1286,7 +1278,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// the destination has support for it.
imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable)
imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial)
- if ic.c.fetchPartialBlobs && okSource && okDest && !diffIDIsNeeded {
+ if okSource && okDest && !diffIDIsNeeded {
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
progress := make(chan int64)
@@ -1320,7 +1312,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
return info, cachedDiffID, nil
}
bar.Abort(true)
- logrus.Errorf("Failed to retrieve partial blob: %v", err)
+ logrus.Debugf("Failed to retrieve partial blob: %v", err)
}
// Fallback: copy the layer, computing the diffID if we need to do so
@@ -1364,7 +1356,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
- var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
+ var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil
var diffIDChan chan diffIDResult
err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow
@@ -1375,7 +1367,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
_ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
}()
- getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer {
+ getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer {
// If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further
// reading from the pipe has failed, we don’t really care.
// We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it,
@@ -1394,7 +1386,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
}
// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest.
-func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) {
+func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) {
result := diffIDResult{
digest: "",
err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"),
@@ -1406,7 +1398,7 @@ func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadClo
}
// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
-func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) {
+func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) {
if decompressor != nil {
s, err := decompressor(stream)
if err != nil {
@@ -1439,7 +1431,7 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
// perhaps (de/re/)compressing it if canModifyBlob,
// and returns a complete blobInfo of the copied blob.
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
- getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
+ getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
if isConfig { // This is guaranteed by the caller, but set it here to be explicit.
canModifyBlob = false
@@ -1733,7 +1725,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
}
// doCompression reads all input from src and writes its compressed equivalent to dest.
-func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compression.Algorithm, compressionLevel *int) error {
+func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error {
compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
if err != nil {
return err
@@ -1751,7 +1743,7 @@ func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, co
}
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
-func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compression.Algorithm) {
+func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
err := errors.New("Internal error: unexpected panic in compressGoroutine")
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
_ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go
index 49957ac4e..e3280aa2b 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go
@@ -21,13 +21,26 @@ const version = "Directory Transport Version: 1.1\n"
var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data")
type dirImageDestination struct {
- ref dirReference
- compress bool
+ ref dirReference
+ desiredLayerCompression types.LayerCompression
}
// newImageDestination returns an ImageDestination for writing to a directory.
-func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) {
- d := &dirImageDestination{ref: ref, compress: compress}
+func newImageDestination(sys *types.SystemContext, ref dirReference) (types.ImageDestination, error) {
+ desiredLayerCompression := types.PreserveOriginal
+ if sys != nil {
+ if sys.DirForceCompress {
+ desiredLayerCompression = types.Compress
+
+ if sys.DirForceDecompress {
+ return nil, errors.Errorf("Cannot compress and decompress at the same time")
+ }
+ }
+ if sys.DirForceDecompress {
+ desiredLayerCompression = types.Decompress
+ }
+ }
+ d := &dirImageDestination{ref: ref, desiredLayerCompression: desiredLayerCompression}
// If directory exists check if it is empty
// if not empty, check whether the contents match that of a container image directory and overwrite the contents
@@ -101,10 +114,7 @@ func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error {
}
func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression {
- if d.compress {
- return types.Compress
- }
- return types.PreserveOriginal
+ return d.desiredLayerCompression
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
diff --git a/vendor/github.com/containers/image/v5/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go
index adfec6ef3..e542d888c 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_transport.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go
@@ -153,11 +153,7 @@ func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemCon
// NewImageDestination returns a types.ImageDestination for this reference.
// The caller must call .Close() on the returned ImageDestination.
func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
- compress := false
- if sys != nil {
- compress = sys.DirForceCompress
- }
- return newImageDestination(ref, compress)
+ return newImageDestination(sys, ref)
}
// DeleteImage deletes the named image from the registry, if supported.
diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go
index 14c11dfd0..3fe9a11d0 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_client.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_client.go
@@ -304,7 +304,7 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
Password: password,
}
- resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil)
+ resp, err := client.makeRequest(ctx, http.MethodGet, "/v2/", nil, nil, v2Auth, nil)
if err != nil {
return err
}
@@ -343,8 +343,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
v1Res := &V1Results{}
// Get credentials from authfile for the underlying hostname
- // lint:ignore SA1019 We can't use GetCredentialsForRef because we want to search the whole registry.
- auth, err := config.GetCredentials(sys, registry) // nolint:staticcheck // https://github.com/golangci/golangci-lint/issues/741
+ // We can't use GetCredentialsForRef here because we want to search the whole registry.
+ auth, err := config.GetCredentials(sys, registry)
if err != nil {
return nil, errors.Wrapf(err, "getting username and password")
}
@@ -380,7 +380,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
u.RawQuery = q.Encode()
logrus.Debugf("trying to talk to v1 search endpoint")
- resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth, nil)
+ resp, err := client.makeRequest(ctx, http.MethodGet, u.String(), nil, nil, noAuth, nil)
if err != nil {
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
} else {
@@ -400,14 +400,15 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
searchRes := []SearchResult{}
path := "/v2/_catalog"
for len(searchRes) < limit {
- resp, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
+ resp, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
- logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, httpResponseToError(resp, ""))
+ err := httpResponseToError(resp, "")
+ logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err)
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
}
v2Res := &V2Results{}
@@ -533,11 +534,10 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
// makeRequest should generally be preferred.
// Note that no exponential back off is performed when receiving an http 429 status code.
func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
- req, err := http.NewRequest(method, url, stream)
+ req, err := http.NewRequestWithContext(ctx, method, url, stream)
if err != nil {
return nil, err
}
- req = req.WithContext(ctx)
if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
req.ContentLength = streamLen
}
@@ -630,13 +630,11 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
return nil, errors.Errorf("missing realm in bearer auth challenge")
}
- authReq, err := http.NewRequest(http.MethodPost, realm, nil)
+ authReq, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, nil)
if err != nil {
return nil, err
}
- authReq = authReq.WithContext(ctx)
-
// Make the form data required against the oauth2 authentication
// More details here: https://docs.docker.com/registry/spec/auth/oauth/
params := authReq.URL.Query()
@@ -680,12 +678,11 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
return nil, errors.Errorf("missing realm in bearer auth challenge")
}
- authReq, err := http.NewRequest(http.MethodGet, realm, nil)
+ authReq, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil)
if err != nil {
return nil, err
}
- authReq = authReq.WithContext(ctx)
params := authReq.URL.Query()
if c.auth.Username != "" {
params.Add("account", c.auth.Username)
@@ -739,7 +736,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
ping := func(scheme string) error {
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
- resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
+ resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
if err != nil {
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
return err
@@ -766,7 +763,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
// best effort to understand if we're talking to a V1 registry
pingV1 := func(scheme string) bool {
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
- resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
+ resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
if err != nil {
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
return false
@@ -800,7 +797,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
// using the original data structures.
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
- res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
+ res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go
index 567a4bcf4..c84bb37d2 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image.go
@@ -68,7 +68,7 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
tags := make([]string, 0)
for {
- res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
+ res, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil {
return nil, err
}
@@ -134,7 +134,7 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
"Accept": manifest.DefaultRequestedManifestMIMETypes,
}
- res, err := client.makeRequest(ctx, "HEAD", path, headers, nil, v2Auth, nil)
+ res, err := client.makeRequest(ctx, http.MethodHead, path, headers, nil, v2Auth, nil)
if err != nil {
return "", err
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
index 84694e157..360a7122e 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
@@ -147,7 +147,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// FIXME? Chunked upload, progress reporting, etc.
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
logrus.Debugf("Uploading %s", uploadPath)
- res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, nil)
+ res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil)
if err != nil {
return types.BlobInfo{}, err
}
@@ -168,7 +168,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// This error text should never be user-visible, we terminate only after makeRequestToResolvedURL
// returns, so there isn’t a way for the error text to be provided to any of our callers.
defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload"))
- res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
+ res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
if err != nil {
logrus.Debugf("Error uploading layer chunked %v", err)
return nil, err
@@ -194,7 +194,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
locationQuery.Set("digest", computedDigest.String())
uploadLocation.RawQuery = locationQuery.Encode()
- res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
+ res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
if err != nil {
return types.BlobInfo{}, err
}
@@ -215,7 +215,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
logrus.Debugf("Checking %s", checkPath)
- res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, extraScope)
+ res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope)
if err != nil {
return false, -1, err
}
@@ -246,7 +246,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
}
mountPath := u.String()
logrus.Debugf("Trying to mount %s", mountPath)
- res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, extraScope)
+ res, err := d.c.makeRequest(ctx, http.MethodPost, mountPath, nil, nil, v2Auth, extraScope)
if err != nil {
return err
}
@@ -264,7 +264,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
return errors.Wrap(err, "determining upload URL after a mount attempt")
}
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
- res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
+ res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
if err != nil {
logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
} else {
@@ -424,7 +424,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
if mimeType != "" {
headers["Content-Type"] = []string{mimeType}
}
- res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth, nil)
+ res, err := d.c.makeRequest(ctx, http.MethodPut, path, headers, bytes.NewReader(m), v2Auth, nil)
if err != nil {
return err
}
@@ -640,7 +640,7 @@ sigExists:
}
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String())
- res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil)
+ res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
index c5a428ba0..5dc8e7b1f 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
@@ -192,7 +192,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
headers := map[string][]string{
"Accept": manifest.DefaultRequestedManifestMIMETypes,
}
- res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil)
+ res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
if err != nil {
return nil, "", err
}
@@ -248,7 +248,7 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
// NOTE: we must not authenticate on additional URLs as those
// can be abused to leak credentials or tokens. Please
// refer to CVE-2020-15157 for more information.
- resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
+ resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
if err == nil {
if resp.StatusCode != http.StatusOK {
err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode))
@@ -295,7 +295,7 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
logrus.Debugf("Downloading %s", path)
- res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil)
+ res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
if err != nil {
return nil, nil, err
}
@@ -364,7 +364,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
logrus.Debugf("Downloading %s", path)
- res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
+ res, err := s.c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil {
return nil, 0, err
}
@@ -454,11 +454,10 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
case "http", "https":
logrus.Debugf("GET %s", url)
- req, err := http.NewRequest("GET", url.String(), nil)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil)
if err != nil {
return nil, false, err
}
- req = req.WithContext(ctx)
res, err := s.c.client.Do(req)
if err != nil {
return nil, false, err
@@ -523,7 +522,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
return err
}
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
- get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, nil)
+ get, err := c.makeRequest(ctx, http.MethodGet, getPath, headers, nil, v2Auth, nil)
if err != nil {
return err
}
@@ -545,7 +544,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
// When retrieving the digest from a registry >= 2.3 use the following header:
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
- delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth, nil)
+ delete, err := c.makeRequest(ctx, http.MethodDelete, deletePath, headers, nil, v2Auth, nil)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
index 1dceaa669..b86e8b1ac 100644
--- a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
+++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
@@ -2,6 +2,7 @@ package blobinfocache
import (
"github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
)
@@ -47,7 +48,7 @@ func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.B
// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by
// TryReusingBlob() implementations to set values in the BlobInfo structure that they return
// upon success.
-func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compression.Algorithm, error) {
+func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compressiontypes.Algorithm, error) {
switch compressorName {
case Uncompressed:
return types.Decompress, nil, nil
diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go
index 5930640ac..4692211c0 100644
--- a/vendor/github.com/containers/image/v5/manifest/common.go
+++ b/vendor/github.com/containers/image/v5/manifest/common.go
@@ -3,7 +3,7 @@ package manifest
import (
"fmt"
- "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
)
@@ -44,7 +44,7 @@ func layerInfosToStrings(infos []LayerInfo) []string {
// compressionMIMETypeSet describes a set of MIME type “variants” that represent differently-compressed
// versions of “the same kind of content”.
-// The map key is the return value of compression.Algorithm.Name(), or mtsUncompressed;
+// The map key is the return value of compressiontypes.Algorithm.Name(), or mtsUncompressed;
// the map value is a MIME type, or mtsUnsupportedMIMEType to mean "recognized but unsupported".
type compressionMIMETypeSet map[string]string
@@ -59,7 +59,7 @@ const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that mean
// If the compression algorithm is unrecognized, or mimeType is not known to have variants that
// differ from it only in what type of compression has been applied, the returned error will not be
// a ManifestLayerCompressionIncompatibilityError.
-func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compression.Algorithm) (string, error) {
+func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compressiontypes.Algorithm) (string, error) {
if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
return "", fmt.Errorf("cannot update unknown MIME type")
}
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
index 584b5f09c..2711ca5eb 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
@@ -5,7 +5,7 @@ import (
"fmt"
"time"
- "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/pkg/strslice"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
@@ -214,14 +214,14 @@ func (m *Schema2) LayerInfos() []LayerInfo {
var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{
{
- mtsUncompressed: DockerV2Schema2ForeignLayerMediaType,
- compression.Gzip.Name(): DockerV2Schema2ForeignLayerMediaTypeGzip,
- compression.Zstd.Name(): mtsUnsupportedMIMEType,
+ mtsUncompressed: DockerV2Schema2ForeignLayerMediaType,
+ compressiontypes.GzipAlgorithmName: DockerV2Schema2ForeignLayerMediaTypeGzip,
+ compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType,
},
{
- mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed,
- compression.Gzip.Name(): DockerV2Schema2LayerMediaType,
- compression.Zstd.Name(): mtsUnsupportedMIMEType,
+ mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed,
+ compressiontypes.GzipAlgorithmName: DockerV2Schema2LayerMediaType,
+ compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType,
},
}
diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go
index 24ce6d080..29a479c94 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci.go
@@ -5,7 +5,7 @@ import (
"fmt"
"strings"
- "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
ociencspec "github.com/containers/ocicrypt/spec"
"github.com/opencontainers/go-digest"
@@ -96,14 +96,14 @@ func (m *OCI1) LayerInfos() []LayerInfo {
var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
{
- mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable,
- compression.Gzip.Name(): imgspecv1.MediaTypeImageLayerNonDistributableGzip,
- compression.Zstd.Name(): imgspecv1.MediaTypeImageLayerNonDistributableZstd,
+ mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable,
+ compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip,
+ compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd,
},
{
- mtsUncompressed: imgspecv1.MediaTypeImageLayer,
- compression.Gzip.Name(): imgspecv1.MediaTypeImageLayerGzip,
- compression.Zstd.Name(): imgspecv1.MediaTypeImageLayerZstd,
+ mtsUncompressed: imgspecv1.MediaTypeImageLayer,
+ compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerGzip,
+ compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerZstd,
},
}
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
index 9925aeda7..55d3f637a 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
@@ -148,13 +148,13 @@ func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io
errWrap := errors.New("failed fetching external blob from all urls")
for _, url := range urls {
- req, err := http.NewRequest("GET", url, nil)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error())
continue
}
- resp, err := s.client.Do(req.WithContext(ctx))
+ resp, err := s.client.Do(req)
if err != nil {
errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error())
continue
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go
index 889772fc0..6ea65bcf3 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift.go
@@ -79,11 +79,10 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
logrus.Debugf("Will send body: %s", requestBody)
requestBodyReader = bytes.NewReader(requestBody)
}
- req, err := http.NewRequest(method, url.String(), requestBodyReader)
+ req, err := http.NewRequestWithContext(ctx, method, url.String(), requestBodyReader)
if err != nil {
return nil, err
}
- req = req.WithContext(ctx)
if len(c.bearerToken) != 0 {
req.Header.Set("Authorization", "Bearer "+c.bearerToken)
@@ -137,7 +136,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) {
// FIXME: validate components per validation.IsValidPathSegmentName?
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName)
- body, err := c.doRequest(ctx, "GET", path, nil)
+ body, err := c.doRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, err
}
@@ -273,7 +272,7 @@ func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error
// FIXME: validate components per validation.IsValidPathSegmentName?
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream)
- body, err := s.client.doRequest(ctx, "GET", path, nil)
+ body, err := s.client.doRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return err
}
@@ -496,7 +495,7 @@ sigExists:
if err != nil {
return err
}
- _, err = d.client.doRequest(ctx, "POST", "/oapi/v1/imagesignatures", body)
+ _, err = d.client.doRequest(ctx, http.MethodPost, "/oapi/v1/imagesignatures", body)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
index 718b50c05..c28e81792 100644
--- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
@@ -9,7 +9,7 @@ import (
"github.com/containers/image/v5/pkg/compression/internal"
"github.com/containers/image/v5/pkg/compression/types"
- "github.com/containers/storage/pkg/chunked"
+ "github.com/containers/storage/pkg/chunked/compressor"
"github.com/klauspost/pgzip"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -21,15 +21,20 @@ type Algorithm = types.Algorithm
var (
// Gzip compression.
- Gzip = internal.NewAlgorithm("gzip", "gzip", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor)
+ Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, types.GzipAlgorithmName,
+ []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor)
// Bzip2 compression.
- Bzip2 = internal.NewAlgorithm("bzip2", "bzip2", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor)
+ Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, types.Bzip2AlgorithmName,
+ []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor)
// Xz compression.
- Xz = internal.NewAlgorithm("Xz", "xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor)
+ Xz = internal.NewAlgorithm(types.XzAlgorithmName, types.XzAlgorithmName,
+ []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor)
// Zstd compression.
- Zstd = internal.NewAlgorithm("zstd", "zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
+ Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName,
+ []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
// Zstd:chunked compression.
- ZstdChunked = internal.NewAlgorithm("zstd:chunked", "zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, chunked.ZstdCompressor)
+ ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */
+ nil, ZstdDecompressor, compressor.ZstdCompressor)
compressionAlgorithms = map[string]Algorithm{
Gzip.Name(): Gzip,
@@ -118,7 +123,8 @@ func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.R
var retAlgo Algorithm
var decompressor DecompressorFunc
for _, algo := range compressionAlgorithms {
- if bytes.HasPrefix(buffer[:n], internal.AlgorithmPrefix(algo)) {
+ prefix := internal.AlgorithmPrefix(algo)
+ if len(prefix) > 0 && bytes.HasPrefix(buffer[:n], prefix) {
logrus.Debugf("Detected compression format %s", algo.Name())
retAlgo = algo
decompressor = internal.AlgorithmDecompressor(algo)
diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
index 5df5370b0..fb37ca317 100644
--- a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
@@ -14,7 +14,7 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
type Algorithm struct {
name string
mime string
- prefix []byte
+ prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection.
decompressor DecompressorFunc
compressor CompressorFunc
}
diff --git a/vendor/github.com/containers/image/v5/pkg/compression/types/types.go b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go
index f96eff2e3..43d03b601 100644
--- a/vendor/github.com/containers/image/v5/pkg/compression/types/types.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go
@@ -11,3 +11,31 @@ type DecompressorFunc = internal.DecompressorFunc
// Algorithm is a compression algorithm provided and supported by pkg/compression.
// It can’t be supplied from the outside.
type Algorithm = internal.Algorithm
+
+const (
+ // GzipAlgorithmName is the name used by pkg/compression.Gzip.
+ // NOTE: Importing only this /types package does not inherently guarantee a Gzip algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ GzipAlgorithmName = "gzip"
+ // Bzip2AlgorithmName is the name used by pkg/compression.Bzip2.
+ // NOTE: Importing only this /types package does not inherently guarantee a Bzip2 algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ Bzip2AlgorithmName = "bzip2"
+ // XzAlgorithmName is the name used by pkg/compression.Xz.
+ // NOTE: Importing only this /types package does not inherently guarantee a Xz algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ XzAlgorithmName = "Xz"
+ // ZstdAlgorithmName is the name used by pkg/compression.Zstd.
+ // NOTE: Importing only this /types package does not inherently guarantee a Zstd algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ ZstdAlgorithmName = "zstd"
+ // ZstdChunkedAlgorithmName is the name used by pkg/compression.ZstdChunked.
+ // NOTE: Importing only this /types package does not inherently guarantee a ZstdChunked algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ ZstdChunkedAlgorithmName = "zstd:chunked"
+)
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
index 8436741f3..c82a9e1a0 100644
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
@@ -236,9 +236,8 @@ func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath {
// file or .docker/config.json, including support for OAuth2 and IdentityToken.
// If an entry is not found, an empty struct is returned.
//
-// Deprecated: GetCredentialsForRef should be used in favor of this API
-// because it allows different credentials for different repositories on the
-// same registry.
+// GetCredentialsForRef should almost always be used in favor of this API to
+// allow different credentials for different repositories on the same registry.
func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) {
return getCredentialsWithHomeDir(sys, nil, registry, homedir.Get())
}
@@ -665,14 +664,11 @@ func findAuthentication(ref reference.Named, registry, path string, legacyFormat
// those entries even in non-legacyFormat ~/.docker/config.json.
// The docker.io registry still uses the /v1/ key with a special host name,
// so account for that as well.
- registry = normalizeAuthFileKey(registry, legacyFormat)
- normalizedAuths := map[string]dockerAuthConfig{}
+ registry = normalizeRegistry(registry)
for k, v := range auths.AuthConfigs {
- normalizedAuths[normalizeAuthFileKey(k, legacyFormat)] = v
- }
-
- if val, exists := normalizedAuths[registry]; exists {
- return decodeDockerAuth(val)
+ if normalizeAuthFileKey(k, legacyFormat) == registry {
+ return decodeDockerAuth(v)
+ }
}
return types.DockerAuthConfig{}, nil
diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go
index 48efa195b..1c4a1419f 100644
--- a/vendor/github.com/containers/image/v5/types/types.go
+++ b/vendor/github.com/containers/image/v5/types/types.go
@@ -636,6 +636,8 @@ type SystemContext struct {
// === dir.Transport overrides ===
// DirForceCompress compresses the image layers if set to true
DirForceCompress bool
+ // DirForceDecompress decompresses the image layers if set to true
+ DirForceDecompress bool
// CompressionFormat is the format to use for the compression of the blobs
CompressionFormat *compression.Algorithm
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index 0a1971535..8936ec087 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -6,7 +6,7 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
- VersionMinor = 14
+ VersionMinor = 15
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 7aa332e41..02261bead 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.33.0
+1.33.1
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
index a435f6b82..0609f970c 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
@@ -52,8 +52,11 @@ import "C"
import (
"fmt"
"io/ioutil"
+ "math"
+ "os"
"path"
"path/filepath"
+ "syscall"
"unsafe"
"github.com/containers/storage/pkg/directory"
@@ -61,6 +64,8 @@ import (
"golang.org/x/sys/unix"
)
+const projectIDsAllocatedPerQuotaHome = 10000
+
// Quota limit params - currently we only control blocks hard limit and inodes
type Quota struct {
Size uint64
@@ -75,23 +80,48 @@ type Control struct {
quotas map[string]uint32
}
+// Attempt to generate a unigue projectid. Multiple directories
+// per file system can have quota and they need a group of unique
+// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000)
+// unique projectids, based on the inode of the basepath.
+func generateUniqueProjectID(path string) (uint32, error) {
+ fileinfo, err := os.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+ stat, ok := fileinfo.Sys().(*syscall.Stat_t)
+ if !ok {
+ return 0, fmt.Errorf("Not a syscall.Stat_t %s", path)
+
+ }
+ projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome)
+ return uint32(projectID), nil
+}
+
// NewControl - initialize project quota support.
// Test to make sure that quota can be set on a test dir and find
// the first project id to be used for the next container create.
//
// Returns nil (and error) if project quota is not supported.
//
-// First get the project id of the home directory.
+// First get the project id of the basePath directory.
// This test will fail if the backing fs is not xfs.
//
// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.:
-// echo 999:/var/lib/containers/storage/overlay >> /etc/projects
-// echo storage:999 >> /etc/projid
-// xfs_quota -x -c 'project -s storage' /<xfs mount point>
+// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects
+// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects
+// echo storage:100000 >> /etc/projid
+// echo volumes:200000 >> /etc/projid
+// xfs_quota -x -c 'project -s storage volumes' /<xfs mount point>
//
-// In that case, the home directory project id will be used as a "start offset"
-// and all containers will be assigned larger project ids (e.g. >= 1000).
-// This is a way to prevent xfs_quota management from conflicting with containers/storage.
+// In the example above, the storage directory project id will be used as a
+// "start offset" and all containers will be assigned larger project ids
+// (e.g. >= 100000). Then the volumes directory project id will be used as a
+// "start offset" and all volumes will be assigned larger project ids
+// (e.g. >= 200000).
+// This is a way to prevent xfs_quota management from conflicting with
+// containers/storage.
+
//
// Then try to create a test directory with the next project id and set a quota
// on it. If that works, continue to scan existing containers to map allocated
@@ -105,8 +135,15 @@ func NewControl(basePath string) (*Control, error) {
if err != nil {
return nil, err
}
- minProjectID++
+ if minProjectID == 0 {
+ // Indicates the storage was never initialized
+ // Generate a unique range of Projectids for this basepath
+ minProjectID, err = generateUniqueProjectID(basePath)
+ if err != nil {
+ return nil, err
+ }
+ }
//
// create backing filesystem device node
//
@@ -180,12 +217,12 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er
d.d_flags = C.FS_PROJ_QUOTA
if quota.Size > 0 {
- d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
+ d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT
d.d_blk_hardlimit = C.__u64(quota.Size / 512)
d.d_blk_softlimit = d.d_blk_hardlimit
}
if quota.Inodes > 0 {
- d.d_fieldmask = C.FS_DQ_IHARD | C.FS_DQ_ISOFT
+ d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT
d.d_ino_hardlimit = C.__u64(quota.Inodes)
d.d_ino_softlimit = d.d_ino_hardlimit
}
diff --git a/vendor/github.com/dtylman/scp/.gitignore b/vendor/github.com/dtylman/scp/.gitignore
new file mode 100644
index 000000000..6e1690ed6
--- /dev/null
+++ b/vendor/github.com/dtylman/scp/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+example/example
diff --git a/vendor/github.com/dtylman/scp/LICENSE b/vendor/github.com/dtylman/scp/LICENSE
new file mode 100644
index 000000000..6565de59d
--- /dev/null
+++ b/vendor/github.com/dtylman/scp/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2016 Danny
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/dtylman/scp/README.md b/vendor/github.com/dtylman/scp/README.md
new file mode 100644
index 000000000..48cfefe02
--- /dev/null
+++ b/vendor/github.com/dtylman/scp/README.md
@@ -0,0 +1,42 @@
+# scp
+
+[![Go Report Card](https://goreportcard.com/badge/github.com/dtylman/scp)](https://goreportcard.com/report/github.com/dtylman/scp)
+
+A Simple `go` SCP client library.
+
+## Usage
+
+```go
+import (
+ "github.com/dtylman/scp"
+ "golang.org/x/crypto/ssh"
+)
+```
+
+## Sending Files
+
+Copies `/var/log/messages` to remote `/tmp/lala`:
+
+```go
+var sc* ssh.Client
+// establish ssh connection into sc here...
+n,err:=scp.CopyTo(sc, "/var/log/messages", "/tmp/lala")
+if err==nil{
+ fmt.Printf("Sent %v bytes",n)
+}
+```
+
+## Receiving Files
+
+Copies remote `/var/log/message` to local `/tmp/lala`:
+
+```go
+var sc* ssh.Client
+// establish ssh connection into sc here...
+n,err:=scp.CopyFrom(sc, "/var/log/message", "/tmp/lala")
+if err==nil{
+ fmt.Printf("Sent %v bytes",n)
+}
+```
+
+
diff --git a/vendor/github.com/dtylman/scp/msg.go b/vendor/github.com/dtylman/scp/msg.go
new file mode 100644
index 000000000..6dfc53535
--- /dev/null
+++ b/vendor/github.com/dtylman/scp/msg.go
@@ -0,0 +1,121 @@
+package scp
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strconv"
+ "strings"
+)
+
+const (
+ //CopyMessage Copy Message Opcode
+ CopyMessage = 'C'
+ //ErrorMessage Error OpCode
+ ErrorMessage = 0x1
+ //WarnMessage Warning Opcode
+ WarnMessage = 0x2
+)
+
+//Message is scp control message
+type Message struct {
+ Type byte
+ Error error
+ Mode string
+ Size int64
+ FileName string
+}
+
+func (m *Message) readByte(reader io.Reader) (byte, error) {
+ buff := make([]byte, 1)
+ _, err := io.ReadFull(reader, buff)
+ if err != nil {
+ return 0, err
+ }
+ return buff[0], nil
+
+}
+
+func (m *Message) readOpCode(reader io.Reader) error {
+ var err error
+ m.Type, err = m.readByte(reader)
+ return err
+}
+
+//ReadError reads an error message
+func (m *Message) ReadError(reader io.Reader) error {
+ msg, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return err
+ }
+ m.Error = errors.New(strings.TrimSpace(string(msg)))
+ return nil
+}
+
+func (m *Message) readLine(reader io.Reader) (string, error) {
+ line := ""
+ b, err := m.readByte(reader)
+ if err != nil {
+ return "", err
+ }
+ for b != 10 {
+ line += string(b)
+ b, err = m.readByte(reader)
+ if err != nil {
+ return "", err
+ }
+ }
+ return line, nil
+}
+
+func (m *Message) readCopy(reader io.Reader) error {
+ line, err := m.readLine(reader)
+ if err != nil {
+ return err
+ }
+ parts := strings.Split(line, " ")
+ if len(parts) < 2 {
+ return errors.New("Invalid copy line: " + line)
+ }
+ m.Mode = parts[0]
+ m.Size, err = strconv.ParseInt(parts[1], 10, 0)
+ if err != nil {
+ return err
+ }
+ m.FileName = parts[2]
+ return nil
+}
+
+//ReadFrom reads message from reader
+func (m *Message) ReadFrom(reader io.Reader) (int64, error) {
+ err := m.readOpCode(reader)
+ if err != nil {
+ return 0, err
+ }
+ switch m.Type {
+ case CopyMessage:
+ err = m.readCopy(reader)
+ if err != nil {
+ return 0, err
+ }
+ case ErrorMessage, WarnMessage:
+ err = m.ReadError(reader)
+ if err != nil {
+ return 0, err
+ }
+ default:
+ return 0, fmt.Errorf("Unsupported opcode: %v", m.Type)
+ }
+ return m.Size, nil
+}
+
+//NewMessageFromReader constructs a new message from a data in reader
+func NewMessageFromReader(reader io.Reader) (*Message, error) {
+ m := new(Message)
+ _, err := m.ReadFrom(reader)
+ if err != nil {
+ return nil, err
+ }
+ return m, nil
+}
diff --git a/vendor/github.com/dtylman/scp/scp.go b/vendor/github.com/dtylman/scp/scp.go
new file mode 100644
index 000000000..841c16965
--- /dev/null
+++ b/vendor/github.com/dtylman/scp/scp.go
@@ -0,0 +1,153 @@
+package scp
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ log "github.com/sirupsen/logrus"
+ "golang.org/x/crypto/ssh"
+)
+
+const (
+ fileMode = "0644"
+ buffSize = 1024 * 256
+)
+
+//CopyTo copy from local to remote
+func CopyTo(sshClient *ssh.Client, local string, remote string) (int64, error) {
+ session, err := sshClient.NewSession()
+ if err != nil {
+ return 0, err
+ }
+ defer session.Close()
+ stderr := &bytes.Buffer{}
+ session.Stderr = stderr
+ stdout := &bytes.Buffer{}
+ session.Stdout = stdout
+ writer, err := session.StdinPipe()
+ if err != nil {
+ return 0, err
+ }
+ defer writer.Close()
+ err = session.Start("scp -t " + filepath.Dir(remote))
+ if err != nil {
+ return 0, err
+ }
+
+ localFile, err := os.Open(local)
+ if err != nil {
+ return 0, err
+ }
+ fileInfo, err := localFile.Stat()
+ if err != nil {
+ return 0, err
+ }
+ _, err = fmt.Fprintf(writer, "C%s %d %s\n", fileMode, fileInfo.Size(), filepath.Base(remote))
+ if err != nil {
+ return 0, err
+ }
+ n, err := copyN(writer, localFile, fileInfo.Size())
+ if err != nil {
+ return 0, err
+ }
+ err = ack(writer)
+ if err != nil {
+ return 0, err
+ }
+
+ err = session.Wait()
+ log.Debugf("Copied %v bytes out of %v. err: %v stdout:%v. stderr:%v", n, fileInfo.Size(), err, stdout, stderr)
+ //NOTE: Process exited with status 1 is not an error, it just how scp work. (waiting for the next control message and we send EOF)
+ return n, nil
+}
+
+//CopyFrom copy from remote to local
+func CopyFrom(sshClient *ssh.Client, remote string, local string) (int64, error) {
+ session, err := sshClient.NewSession()
+ if err != nil {
+ return 0, err
+ }
+ defer session.Close()
+ stderr := &bytes.Buffer{}
+ session.Stderr = stderr
+ writer, err := session.StdinPipe()
+ if err != nil {
+ return 0, err
+ }
+ defer writer.Close()
+ reader, err := session.StdoutPipe()
+ if err != nil {
+ return 0, err
+ }
+ err = session.Start("scp -f " + remote)
+ if err != nil {
+ return 0, err
+ }
+ err = ack(writer)
+ if err != nil {
+ return 0, err
+ }
+ msg, err := NewMessageFromReader(reader)
+ if err != nil {
+ return 0, err
+ }
+ if msg.Type == ErrorMessage || msg.Type == WarnMessage {
+ return 0, msg.Error
+ }
+ log.Debugf("Receiving %v", msg)
+
+ err = ack(writer)
+ if err != nil {
+ return 0, err
+ }
+ outFile, err := os.Create(local)
+ if err != nil {
+ return 0, err
+ }
+ defer outFile.Close()
+ n, err := copyN(outFile, reader, msg.Size)
+ if err != nil {
+ return 0, err
+ }
+ err = outFile.Sync()
+ if err != nil {
+ return 0, err
+ }
+ err = outFile.Close()
+ if err != nil {
+ return 0, err
+ }
+ err = session.Wait()
+ log.Debugf("Copied %v bytes out of %v. err: %v stderr:%v", n, msg.Size, err, stderr)
+ return n, nil
+}
+
+func ack(writer io.Writer) error {
+ var msg = []byte{0, 0, 10, 13}
+ n, err := writer.Write(msg)
+ if err != nil {
+ return err
+ }
+ if n < len(msg) {
+ return errors.New("Failed to write ack buffer")
+ }
+ return nil
+}
+
+func copyN(writer io.Writer, src io.Reader, size int64) (int64, error) {
+ reader := io.LimitReader(src, size)
+ var total int64
+ for total < size {
+ n, err := io.CopyBuffer(writer, reader, make([]byte, buffSize))
+ log.Debugf("Copied chunk %v total: %v out of %v err: %v ", n, total, size, err)
+ if err != nil {
+ return 0, err
+ }
+ total += n
+ }
+ return total, nil
+}