summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/containers/buildah/.cirrus.yml85
-rw-r--r--vendor/github.com/containers/buildah/Makefile9
-rw-r--r--vendor/github.com/containers/buildah/add.go8
-rw-r--r--vendor/github.com/containers/buildah/buildah.go3
-rw-r--r--vendor/github.com/containers/buildah/changelog.txt31
-rw-r--r--vendor/github.com/containers/buildah/commit.go176
-rw-r--r--vendor/github.com/containers/buildah/copier/copier.go140
-rw-r--r--vendor/github.com/containers/buildah/copier/syscall_unix.go16
-rw-r--r--vendor/github.com/containers/buildah/copier/syscall_windows.go5
-rw-r--r--vendor/github.com/containers/buildah/define/build.go2
-rw-r--r--vendor/github.com/containers/buildah/define/types.go2
-rw-r--r--vendor/github.com/containers/buildah/go.mod19
-rw-r--r--vendor/github.com/containers/buildah/go.sum118
-rw-r--r--vendor/github.com/containers/buildah/image.go2
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go13
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go151
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_unsupported.go13
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/executor.go68
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/stage_executor.go91
-rw-r--r--vendor/github.com/containers/buildah/install.md78
-rw-r--r--vendor/github.com/containers/buildah/new.go250
-rw-r--r--vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go26
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go12
-rw-r--r--vendor/github.com/containers/buildah/pkg/overlay/overlay.go10
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go57
-rw-r--r--vendor/github.com/containers/buildah/pull.go270
-rw-r--r--vendor/github.com/containers/buildah/push.go126
-rw-r--r--vendor/github.com/containers/buildah/run.go5
-rw-r--r--vendor/github.com/containers/buildah/run_linux.go237
-rw-r--r--vendor/github.com/containers/buildah/util/util.go85
-rw-r--r--vendor/github.com/containers/common/libimage/copier.go427
-rw-r--r--vendor/github.com/containers/common/libimage/disk_usage.go126
-rw-r--r--vendor/github.com/containers/common/libimage/download.go46
-rw-r--r--vendor/github.com/containers/common/libimage/events.go43
-rw-r--r--vendor/github.com/containers/common/libimage/filters.go228
-rw-r--r--vendor/github.com/containers/common/libimage/history.go80
-rw-r--r--vendor/github.com/containers/common/libimage/image.go802
-rw-r--r--vendor/github.com/containers/common/libimage/image_config.go242
-rw-r--r--vendor/github.com/containers/common/libimage/image_tree.go96
-rw-r--r--vendor/github.com/containers/common/libimage/import.go108
-rw-r--r--vendor/github.com/containers/common/libimage/inspect.go206
-rw-r--r--vendor/github.com/containers/common/libimage/layer_tree.go249
-rw-r--r--vendor/github.com/containers/common/libimage/load.go125
-rw-r--r--vendor/github.com/containers/common/libimage/manifest_list.go389
-rw-r--r--vendor/github.com/containers/common/libimage/manifests/copy.go (renamed from vendor/github.com/containers/buildah/manifests/copy.go)0
-rw-r--r--vendor/github.com/containers/common/libimage/manifests/manifests.go (renamed from vendor/github.com/containers/buildah/manifests/manifests.go)4
-rw-r--r--vendor/github.com/containers/common/libimage/normalize.go92
-rw-r--r--vendor/github.com/containers/common/libimage/oci.go97
-rw-r--r--vendor/github.com/containers/common/libimage/pull.go458
-rw-r--r--vendor/github.com/containers/common/libimage/push.go83
-rw-r--r--vendor/github.com/containers/common/libimage/runtime.go573
-rw-r--r--vendor/github.com/containers/common/libimage/save.go202
-rw-r--r--vendor/github.com/containers/common/libimage/search.go307
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go29
-rw-r--r--vendor/github.com/containers/common/pkg/config/pull_policy.go95
-rw-r--r--vendor/github.com/containers/common/pkg/filters/filters.go118
-rw-r--r--vendor/github.com/containers/common/pkg/manifests/errors.go (renamed from vendor/github.com/containers/buildah/pkg/manifests/errors.go)0
-rw-r--r--vendor/github.com/containers/common/pkg/manifests/manifests.go (renamed from vendor/github.com/containers/buildah/pkg/manifests/manifests.go)0
-rw-r--r--vendor/github.com/containers/common/pkg/signal/signal_common.go41
-rw-r--r--vendor/github.com/containers/common/pkg/signal/signal_linux.go108
-rw-r--r--vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go108
-rw-r--r--vendor/github.com/containers/common/pkg/signal/signal_unsupported.go99
-rw-r--r--vendor/github.com/containers/common/pkg/supplemented/errors.go (renamed from vendor/github.com/containers/buildah/pkg/supplemented/errors.go)2
-rw-r--r--vendor/github.com/containers/common/pkg/supplemented/supplemented.go (renamed from vendor/github.com/containers/buildah/pkg/supplemented/supplemented.go)0
-rw-r--r--vendor/github.com/containers/common/pkg/timetype/timestamp.go131
-rw-r--r--vendor/github.com/containers/common/version/version.go2
-rw-r--r--vendor/github.com/containers/image/v5/docker/tarfile/dest.go119
-rw-r--r--vendor/github.com/containers/image/v5/docker/tarfile/doc.go3
-rw-r--r--vendor/github.com/containers/image/v5/docker/tarfile/src.go104
-rw-r--r--vendor/github.com/containers/image/v5/docker/tarfile/types.go8
-rw-r--r--vendor/github.com/disiqueira/gotree/v3/.gitignore137
-rw-r--r--vendor/github.com/disiqueira/gotree/v3/.travis.yml11
-rw-r--r--vendor/github.com/disiqueira/gotree/v3/LICENSE21
-rw-r--r--vendor/github.com/disiqueira/gotree/v3/README.md104
-rw-r--r--vendor/github.com/disiqueira/gotree/v3/_config.yml1
-rw-r--r--vendor/github.com/disiqueira/gotree/v3/go.mod3
-rw-r--r--vendor/github.com/disiqueira/gotree/v3/gotree-logo.pngbin0 -> 24183 bytes
-rw-r--r--vendor/github.com/disiqueira/gotree/v3/gotree.go129
-rw-r--r--vendor/github.com/ishidawataru/sctp/.travis.yml11
-rw-r--r--vendor/github.com/ishidawataru/sctp/sctp_linux.go2
-rw-r--r--vendor/github.com/jinzhu/copier/License20
-rw-r--r--vendor/github.com/jinzhu/copier/README.md131
-rw-r--r--vendor/github.com/jinzhu/copier/copier.go491
-rw-r--r--vendor/github.com/jinzhu/copier/errors.go10
-rw-r--r--vendor/github.com/jinzhu/copier/go.mod3
-rw-r--r--vendor/github.com/openshift/imagebuilder/README.md4
-rw-r--r--vendor/github.com/openshift/imagebuilder/builder.go4
-rw-r--r--vendor/github.com/openshift/imagebuilder/dispatchers.go21
-rw-r--r--vendor/github.com/openshift/imagebuilder/imagebuilder.spec2
89 files changed, 7740 insertions, 1423 deletions
diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml
index 32c711be8..e62c14863 100644
--- a/vendor/github.com/containers/buildah/.cirrus.yml
+++ b/vendor/github.com/containers/buildah/.cirrus.yml
@@ -26,12 +26,12 @@ env:
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# See https://github.com/containers/podman/blob/master/contrib/cirrus/README.md#test_build_cache_images_task-task
- FEDORA_NAME: "fedora-33"
- PRIOR_FEDORA_NAME: "fedora-32"
- UBUNTU_NAME: "ubuntu-2010"
- PRIOR_UBUNTU_NAME: "ubuntu-2004"
+ FEDORA_NAME: "fedora-34"
+ PRIOR_FEDORA_NAME: "fedora-33"
+ UBUNTU_NAME: "ubuntu-2104"
+ PRIOR_UBUNTU_NAME: "ubuntu-2010"
- IMAGE_SUFFIX: "c6102133168668672"
+ IMAGE_SUFFIX: "c6032583541653504"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
@@ -133,14 +133,20 @@ vendor_task:
unit_task:
- name: "Unit tests"
+ name: 'Unit tests w/ $STORAGE_DRIVER'
alias: unit
depends_on:
- smoke
- vendor
- timeout_in: 45m
+ timeout_in: 50m
+
+ matrix:
+ - env:
+ STORAGE_DRIVER: 'vfs'
+ - env:
+ STORAGE_DRIVER: 'overlay'
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
@@ -149,13 +155,9 @@ unit_task:
binary_artifacts:
path: ./bin/*
- env:
- matrix:
- STORAGE_DRIVER: 'vfs'
- STORAGE_DRIVER: 'overlay'
conformance_task:
- name: "Docker Build Conformance"
+ name: 'Build Conformance w/ $STORAGE_DRIVER'
alias: conformance
depends_on:
@@ -166,13 +168,15 @@ conformance_task:
timeout_in: 25m
+ matrix:
+ - env:
+ STORAGE_DRIVER: 'vfs'
+ - env:
+ STORAGE_DRIVER: 'overlay'
+
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
conformance_test_script: '${SCRIPT_BASE}/test.sh conformance |& ${_TIMESTAMP}'
- env:
- matrix:
- STORAGE_DRIVER: 'vfs'
- STORAGE_DRIVER: 'overlay'
# Confirm cross-compile ALL archetectures on a Mac OS-X VM.
cross_build_task:
@@ -208,6 +212,9 @@ static_build_task:
memory: 12
disk: 200
+ env:
+ NIX_FQIN: "docker.io/nixos/nix:latest"
+
init_script: |
set -ex
setenforce 0
@@ -223,8 +230,16 @@ static_build_task:
set -ex
mkdir -p .cache
mv .cache /nix
- if [[ -z $(ls -A /nix) ]]; then podman run --rm --privileged -ti -v /:/mnt nixos/nix cp -rfT /nix /mnt/nix; fi
- podman run --rm --privileged -ti -v /nix:/nix -v ${PWD}:${PWD} -w ${PWD} nixos/nix nix --print-build-logs --option cores 8 --option max-jobs 8 build --file nix/
+ if [[ -z $(ls -A /nix) ]]; then
+ podman run --rm --privileged -i -v /:/mnt \
+ $NIX_FQIN \
+ cp -rfT /nix /mnt/nix
+ fi
+ podman run --rm --privileged -i -v /nix:/nix \
+ -v ${PWD}:${PWD} -w ${PWD} \
+ $NIX_FQIN \
+ nix --print-build-logs --option cores 8 \
+ --option max-jobs 8 build --file nix/
binaries_artifacts:
path: "result/bin/buildah"
@@ -235,25 +250,47 @@ static_build_task:
integration_task:
- name: "Integration $DISTRO_NV"
+ name: "Integration $DISTRO_NV w/ $STORAGE_DRIVER"
alias: integration
depends_on:
- unit
matrix:
+ # VFS
+ - env:
+ DISTRO_NV: "${FEDORA_NAME}"
+ IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'vfs'
+ - env:
+ DISTRO_NV: "${PRIOR_FEDORA_NAME}"
+ IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'vfs'
+ - env:
+ DISTRO_NV: "${UBUNTU_NAME}"
+ IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'vfs'
+ - env:
+ DISTRO_NV: "${PRIOR_UBUNTU_NAME}"
+ IMAGE_NAME: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'vfs'
+ # OVERLAY
- env:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
- # - env:
- # DISTRO_NV: "${PRIOR_FEDORA_NAME}"
- # IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'overlay'
+ - env:
+ DISTRO_NV: "${PRIOR_FEDORA_NAME}"
+ IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'overlay'
- env:
DISTRO_NV: "${UBUNTU_NAME}"
IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'overlay'
- env:
DISTRO_NV: "${PRIOR_UBUNTU_NAME}"
IMAGE_NAME: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'overlay'
gce_instance:
image_name: "$IMAGE_NAME"
@@ -276,10 +313,6 @@ integration_task:
package_versions_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh packages'
golang_version_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh golang'
- env:
- matrix:
- STORAGE_DRIVER: 'vfs'
- STORAGE_DRIVER: 'overlay'
in_podman_task:
name: "Containerized Integration"
diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile
index 9ff59df55..2a54d73c1 100644
--- a/vendor/github.com/containers/buildah/Makefile
+++ b/vendor/github.com/containers/buildah/Makefile
@@ -51,8 +51,11 @@ all: bin/buildah bin/imgtype docs
# Update nix/nixpkgs.json its latest stable commit
.PHONY: nixpkgs
nixpkgs:
- @nix run -f channel:nixos-20.09 nix-prefetch-git -c nix-prefetch-git \
- --no-deepClone https://github.com/nixos/nixpkgs > nix/nixpkgs.json
+ @nix run \
+ -f channel:nixos-20.09 nix-prefetch-git \
+ -c nix-prefetch-git \
+ --no-deepClone \
+ https://github.com/nixos/nixpkgs refs/heads/nixos-20.09 > nix/nixpkgs.json
# Build statically linked binary
.PHONY: static
@@ -161,7 +164,7 @@ tests/testreport/testreport: tests/testreport/testreport.go
.PHONY: test-unit
test-unit: tests/testreport/testreport
- $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -race $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd) -timeout 40m
+ $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -race $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd) -timeout 45m
tmp=$(shell mktemp -d) ; \
mkdir -p $$tmp/root $$tmp/runroot; \
$(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -race ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf
diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go
index e81e35c30..0a77e9f9d 100644
--- a/vendor/github.com/containers/buildah/add.go
+++ b/vendor/github.com/containers/buildah/add.go
@@ -224,7 +224,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}
localSourceStats, err = copier.Stat(contextDir, contextDir, statOptions, localSources)
if err != nil {
- return errors.Wrapf(err, "error checking on sources %v under %q", localSources, contextDir)
+ return errors.Wrapf(err, "checking on sources under %q", contextDir)
}
}
numLocalSourceItems := 0
@@ -238,10 +238,10 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
errorText = fmt.Sprintf("possible escaping context directory error: %s", errorText)
}
- return errors.Errorf("error checking on source %v under %q: %v", localSourceStat.Glob, contextDir, errorText)
+ return errors.Errorf("checking on sources under %q: %v", contextDir, errorText)
}
if len(localSourceStat.Globbed) == 0 {
- return errors.Wrapf(syscall.ENOENT, "error checking on source %v under %q: no glob matches", localSourceStat.Glob, contextDir)
+ return errors.Wrapf(syscall.ENOENT, "checking source under %q: no glob matches", contextDir)
}
numLocalSourceItems += len(localSourceStat.Globbed)
}
@@ -433,7 +433,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}
}
if localSourceStat == nil {
- return errors.Errorf("internal error: should have statted %s, but we didn't?", src)
+ continue
}
// Iterate through every item that matched the glob.
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index b0ddd0f72..771165d43 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -357,6 +357,9 @@ type ImportFromImageOptions struct {
// NewBuilder creates a new build container.
func NewBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) {
+ if options.CommonBuildOpts == nil {
+ options.CommonBuildOpts = &CommonBuildOptions{}
+ }
return newBuilder(ctx, store, options)
}
diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt
index 74929da78..7d31c854d 100644
--- a/vendor/github.com/containers/buildah/changelog.txt
+++ b/vendor/github.com/containers/buildah/changelog.txt
@@ -1,3 +1,34 @@
+- Changelog for v1.20.1 (2021-04-13)
+ * Run container with isolation type set at 'from'
+ * bats helpers.bash - minor refactoring
+ * Bump containers/storage vendor to v1.29.0
+ * build(deps): bump github.com/onsi/ginkgo from 1.16.0 to 1.16.1
+ * Cirrus: Update VMs w/ F34beta
+ * CLI add/copy: add a --from option
+ * build(deps): bump github.com/onsi/ginkgo from 1.15.2 to 1.16.0
+ * Add authentication system tests for 'commit' and 'bud'
+ * fix local image lookup for custom platform
+ * Double-check existence of OCI runtimes
+ * Cirrus: Make use of shared get_ci_vm container
+ * Add system tests of "buildah run"
+ * Update nix pin with `make nixpkgs`
+ * Remove some stuttering on returns errors
+ * Setup alias for --tty to --terminal
+ * Add conformance tests for COPY /...
+ * Put a few more minutes on the clock for the CI conformance test
+ * Add a conformance test for COPY --from $symlink
+ * Add conformance tests for COPY ""
+ * Check for symlink in builtin volume
+ * Sort all mounts by destination directory
+ * System-test cleanup
+ * Export parse.Platform string to be used by podman-remote
+ * blobcache: fix sequencing error
+ * build(deps): bump github.com/containers/common from 0.35.3 to 0.35.4
+ * Fix URL in demos/buildah_multi_stage.sh
+ * Add a few system tests
+ * [NO TESTS NEEDED] Use --recurse-modules when building git context
+ * Bump to v1.20.1-dev
+
- Changelog for v1.20.0 (2021-03-25)
* vendor in containers/storage v1.28.1
* build(deps): bump github.com/containers/common from 0.35.2 to 0.35.3
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index f588c8043..139355517 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -3,16 +3,15 @@ package buildah
import (
"context"
"encoding/json"
- "fmt"
"io"
"io/ioutil"
"os"
"strings"
"time"
- "github.com/containers/buildah/manifests"
"github.com/containers/buildah/pkg/blobcache"
"github.com/containers/buildah/util"
+ "github.com/containers/common/libimage/manifests"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
@@ -104,59 +103,6 @@ type CommitOptions struct {
OciEncryptLayers *[]int
}
-// PushOptions can be used to alter how an image is copied somewhere.
-type PushOptions struct {
- // Compression specifies the type of compression which is applied to
- // layer blobs. The default is to not use compression, but
- // archive.Gzip is recommended.
- Compression archive.Compression
- // SignaturePolicyPath specifies an override location for the signature
- // policy which should be used for verifying the new image as it is
- // being written. Except in specific circumstances, no value should be
- // specified, indicating that the shared, system-wide default policy
- // should be used.
- SignaturePolicyPath string
- // ReportWriter is an io.Writer which will be used to log the writing
- // of the new image.
- ReportWriter io.Writer
- // Store is the local storage store which holds the source image.
- Store storage.Store
- // github.com/containers/image/types SystemContext to hold credentials
- // and other authentication/authorization information.
- SystemContext *types.SystemContext
- // ManifestType is the format to use when saving the image using the 'dir' transport
- // possible options are oci, v2s1, and v2s2
- ManifestType string
- // BlobDirectory is the name of a directory in which we'll look for
- // prebuilt copies of layer blobs that we might otherwise need to
- // regenerate from on-disk layers, substituting them in the list of
- // blobs to copy whenever possible.
- BlobDirectory string
- // Quiet is a boolean value that determines if minimal output to
- // the user will be displayed, this is best used for logging.
- // The default is false.
- Quiet bool
- // SignBy is the fingerprint of a GPG key to use for signing the image.
- SignBy string
- // RemoveSignatures causes any existing signatures for the image to be
- // discarded for the pushed copy.
- RemoveSignatures bool
- // MaxRetries is the maximum number of attempts we'll make to push any
- // one image to the external registry if the first attempt fails.
- MaxRetries int
- // RetryDelay is how long to wait before retrying a push attempt.
- RetryDelay time.Duration
- // OciEncryptConfig when non-nil indicates that an image should be encrypted.
- // The encryption options is derived from the construction of EncryptConfig object.
- OciEncryptConfig *encconfig.EncryptConfig
- // OciEncryptLayers represents the list of layers to encrypt.
- // If nil, don't encrypt any layers.
- // If non-nil and len==0, denotes encrypt all layers.
- // integers in the slice represent 0-indexed layer indices, with support for negative
- // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
- OciEncryptLayers *[]int
-}
-
var (
// storageAllowedPolicyScopes overrides the policy for local storage
// to ensure that we can read images from it.
@@ -239,7 +185,7 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe
}
}
- names, err := util.ExpandNames([]string{manifestName}, "", systemContext, b.store)
+ names, err := util.ExpandNames([]string{manifestName}, systemContext, b.store)
if err != nil {
return "", errors.Wrapf(err, "error encountered while expanding image name %q", manifestName)
}
@@ -341,30 +287,6 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
systemContext.OCIInsecureSkipTLSVerify = true
systemContext.DockerDaemonInsecureSkipTLSVerify = true
}
- if len(options.AdditionalTags) > 0 {
- names, err := util.ExpandNames(options.AdditionalTags, "", systemContext, b.store)
- if err != nil {
- return imgID, nil, "", err
- }
- for _, name := range names {
- additionalDest, err := docker.Transport.ParseReference(name)
- if err != nil {
- return imgID, nil, "", errors.Wrapf(err, "error parsing image name %q as an image reference", name)
- }
- insecure, err := checkRegistrySourcesAllows("commit to", additionalDest)
- if err != nil {
- return imgID, nil, "", err
- }
- if insecure {
- if systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
- return imgID, nil, "", errors.Errorf("can't require tls verification on an insecured registry")
- }
- systemContext.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
- systemContext.OCIInsecureSkipTLSVerify = true
- systemContext.DockerDaemonInsecureSkipTLSVerify = true
- }
- }
- }
logrus.Debugf("committing image with reference %q is allowed by policy", transports.ImageName(dest))
// Check if the base image is already in the destination and it's some kind of local
@@ -495,97 +417,3 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
return imgID, ref, manifestDigest, nil
}
-
-// Push copies the contents of the image to a new location.
-func Push(ctx context.Context, image string, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) {
- systemContext := getSystemContext(options.Store, options.SystemContext, options.SignaturePolicyPath)
-
- if options.Quiet {
- options.ReportWriter = nil // Turns off logging output
- }
- blocked, err := isReferenceBlocked(dest, systemContext)
- if err != nil {
- return nil, "", errors.Wrapf(err, "error checking if pushing to registry for %q is blocked", transports.ImageName(dest))
- }
- if blocked {
- return nil, "", errors.Errorf("push access to registry for %q is blocked by configuration", transports.ImageName(dest))
- }
-
- // Load the system signing policy.
- pushPolicy, err := signature.DefaultPolicy(systemContext)
- if err != nil {
- return nil, "", errors.Wrapf(err, "error obtaining default signature policy")
- }
- // Override the settings for local storage to make sure that we can always read the source "image".
- pushPolicy.Transports[is.Transport.Name()] = storageAllowedPolicyScopes
-
- policyContext, err := signature.NewPolicyContext(pushPolicy)
- if err != nil {
- return nil, "", errors.Wrapf(err, "error creating new signature policy context")
- }
- defer func() {
- if err2 := policyContext.Destroy(); err2 != nil {
- logrus.Debugf("error destroying signature policy context: %v", err2)
- }
- }()
-
- // Look up the image.
- src, _, err := util.FindImage(options.Store, "", systemContext, image)
- if err != nil {
- return nil, "", err
- }
- maybeCachedSrc := src
- if options.BlobDirectory != "" {
- compress := types.PreserveOriginal
- if options.Compression != archive.Uncompressed {
- compress = types.Compress
- }
- cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress)
- if err != nil {
- return nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(src), options.BlobDirectory)
- }
- maybeCachedSrc = cache
- }
-
- // Check if the push is blocked by $BUILDER_REGISTRY_SOURCES.
- insecure, err := checkRegistrySourcesAllows("push to", dest)
- if err != nil {
- return nil, "", err
- }
- if insecure {
- if systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
- return nil, "", errors.Errorf("can't require tls verification on an insecured registry")
- }
- systemContext.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
- systemContext.OCIInsecureSkipTLSVerify = true
- systemContext.DockerDaemonInsecureSkipTLSVerify = true
- }
- logrus.Debugf("pushing image to reference %q is allowed by policy", transports.ImageName(dest))
-
- // Copy everything.
- switch options.Compression {
- case archive.Uncompressed:
- systemContext.OCIAcceptUncompressedLayers = true
- case archive.Gzip:
- systemContext.DirForceCompress = true
- }
- var manifestBytes []byte
- if manifestBytes, err = retryCopyImage(ctx, policyContext, dest, maybeCachedSrc, dest, getCopyOptions(options.Store, options.ReportWriter, nil, systemContext, options.ManifestType, options.RemoveSignatures, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
- return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(maybeCachedSrc), transports.ImageName(dest))
- }
- if options.ReportWriter != nil {
- fmt.Fprintf(options.ReportWriter, "")
- }
- manifestDigest, err := manifest.Digest(manifestBytes)
- if err != nil {
- return nil, "", errors.Wrapf(err, "error computing digest of manifest of new image %q", transports.ImageName(dest))
- }
- var ref reference.Canonical
- if name := dest.DockerReference(); name != nil {
- ref, err = reference.WithDigest(name, manifestDigest)
- if err != nil {
- logrus.Warnf("error generating canonical reference with name %q and digest %s: %v", name, manifestDigest.String(), err)
- }
- }
- return ref, manifestDigest, nil
-}
diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go
index a37d4635e..8f6821c31 100644
--- a/vendor/github.com/containers/buildah/copier/copier.go
+++ b/vendor/github.com/containers/buildah/copier/copier.go
@@ -70,12 +70,13 @@ func isArchivePath(path string) bool {
type requestType string
const (
- requestEval requestType = "EVAL"
- requestStat requestType = "STAT"
- requestGet requestType = "GET"
- requestPut requestType = "PUT"
- requestMkdir requestType = "MKDIR"
- requestQuit requestType = "QUIT"
+ requestEval requestType = "EVAL"
+ requestStat requestType = "STAT"
+ requestGet requestType = "GET"
+ requestPut requestType = "PUT"
+ requestMkdir requestType = "MKDIR"
+ requestRemove requestType = "REMOVE"
+ requestQuit requestType = "QUIT"
)
// Request encodes a single request.
@@ -88,10 +89,11 @@ type request struct {
preservedDirectory string
Globs []string `json:",omitempty"` // used by stat, get
preservedGlobs []string
- StatOptions StatOptions `json:",omitempty"`
- GetOptions GetOptions `json:",omitempty"`
- PutOptions PutOptions `json:",omitempty"`
- MkdirOptions MkdirOptions `json:",omitempty"`
+ StatOptions StatOptions `json:",omitempty"`
+ GetOptions GetOptions `json:",omitempty"`
+ PutOptions PutOptions `json:",omitempty"`
+ MkdirOptions MkdirOptions `json:",omitempty"`
+ RemoveOptions RemoveOptions `json:",omitempty"`
}
func (req *request) Excludes() []string {
@@ -106,6 +108,8 @@ func (req *request) Excludes() []string {
return nil
case requestMkdir:
return nil
+ case requestRemove:
+ return nil
case requestQuit:
return nil
default:
@@ -125,6 +129,8 @@ func (req *request) UIDMap() []idtools.IDMap {
return req.PutOptions.UIDMap
case requestMkdir:
return req.MkdirOptions.UIDMap
+ case requestRemove:
+ return nil
case requestQuit:
return nil
default:
@@ -144,6 +150,8 @@ func (req *request) GIDMap() []idtools.IDMap {
return req.PutOptions.GIDMap
case requestMkdir:
return req.MkdirOptions.GIDMap
+ case requestRemove:
+ return nil
case requestQuit:
return nil
default:
@@ -153,12 +161,13 @@ func (req *request) GIDMap() []idtools.IDMap {
// Response encodes a single response.
type response struct {
- Error string `json:",omitempty"`
- Stat statResponse
- Eval evalResponse
- Get getResponse
- Put putResponse
- Mkdir mkdirResponse
+ Error string `json:",omitempty"`
+ Stat statResponse `json:",omitempty"`
+ Eval evalResponse `json:",omitempty"`
+ Get getResponse `json:",omitempty"`
+ Put putResponse `json:",omitempty"`
+ Mkdir mkdirResponse `json:",omitempty"`
+ Remove removeResponse `json:",omitempty"`
}
// statResponse encodes a response for a single Stat request.
@@ -205,6 +214,10 @@ type putResponse struct {
type mkdirResponse struct {
}
+// removeResponse encodes a response for a single Remove request.
+type removeResponse struct {
+}
+
// EvalOptions controls parts of Eval()'s behavior.
type EvalOptions struct {
}
@@ -285,6 +298,7 @@ type GetOptions struct {
Rename map[string]string // rename items with the specified names, or under the specified names
NoDerefSymlinks bool // don't follow symlinks when globs match them
IgnoreUnreadable bool // ignore errors reading items, instead of returning an error
+ NoCrossDevice bool // if a subdirectory is a mountpoint with a different device number, include it but skip its contents
}
// Get produces an archive containing items that match the specified glob
@@ -396,6 +410,36 @@ func Mkdir(root string, directory string, options MkdirOptions) error {
return nil
}
+// RemoveOptions controls parts of Remove()'s behavior.
+type RemoveOptions struct {
+ All bool // if Directory is a directory, remove its contents as well
+}
+
+// Remove removes the specified directory or item, traversing any intermediate
+// symbolic links.
+// If the root directory is not specified, the current root directory is used.
+// If root is specified and the current OS supports it, and the calling process
+// has the necessary privileges, the remove() is performed in a chrooted context.
+// If the item to remove is specified as an absolute path, it should either be
+// in the root directory or in a subdirectory of the root directory. Otherwise,
+// the directory is treated as a path relative to the root directory.
+func Remove(root string, item string, options RemoveOptions) error {
+ req := request{
+ Request: requestRemove,
+ Root: root,
+ Directory: item,
+ RemoveOptions: options,
+ }
+ resp, err := copier(nil, nil, req)
+ if err != nil {
+ return err
+ }
+ if resp.Error != "" {
+ return errors.New(resp.Error)
+ }
+ return nil
+}
+
// cleanerReldirectory resolves relative path candidate lexically, attempting
// to ensure that when joined as a subdirectory of another directory, it does
// not reference anything outside of that other directory.
@@ -819,6 +863,9 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re
return copierHandlerPut(bulkReader, req, idMappings)
case requestMkdir:
return copierHandlerMkdir(req, idMappings)
+ case requestRemove:
+ resp := copierHandlerRemove(req)
+ return resp, nil, nil
case requestQuit:
return nil, nil, nil
}
@@ -859,7 +906,7 @@ func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bo
// it is not expected to be.
// This helps us approximate chrooted behavior on systems and in test cases
// where chroot isn't available.
-func resolvePath(root, path string, pm *fileutils.PatternMatcher) (string, error) {
+func resolvePath(root, path string, evaluateFinalComponent bool, pm *fileutils.PatternMatcher) (string, error) {
rel, err := convertToRelSubdirectory(root, path)
if err != nil {
return "", errors.Errorf("error making path %q relative to %q", path, root)
@@ -876,7 +923,7 @@ func resolvePath(root, path string, pm *fileutils.PatternMatcher) (string, error
}
excluded = excluded || thisExcluded
if !excluded {
- if target, err := os.Readlink(filepath.Join(workingPath, components[0])); err == nil {
+ if target, err := os.Readlink(filepath.Join(workingPath, components[0])); err == nil && !(len(components) == 1 && !evaluateFinalComponent) {
followed++
if followed > maxLoopsFollowed {
return "", &os.PathError{
@@ -922,7 +969,7 @@ func copierHandlerEval(req request) *response {
errorResponse := func(fmtspec string, args ...interface{}) *response {
return &response{Error: fmt.Sprintf(fmtspec, args...), Eval: evalResponse{}}
}
- resolvedTarget, err := resolvePath(req.Root, req.Directory, nil)
+ resolvedTarget, err := resolvePath(req.Root, req.Directory, true, nil)
if err != nil {
return errorResponse("copier: eval: error resolving %q: %v", req.Directory, err)
}
@@ -941,11 +988,13 @@ func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
s := StatsForGlob{
Glob: req.preservedGlobs[i],
}
- stats = append(stats, &s)
// glob this pattern
globMatched, err := filepath.Glob(glob)
if err != nil {
s.Error = fmt.Sprintf("copier: stat: %q while matching glob pattern %q", err.Error(), glob)
+ }
+
+ if len(globMatched) == 0 && strings.ContainsAny(glob, "*?[") {
continue
}
// collect the matches
@@ -1001,7 +1050,7 @@ func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
// could be a relative link) and in the context
// of the chroot
result.ImmediateTarget = immediateTarget
- resolvedTarget, err := resolvePath(req.Root, globbed, pm)
+ resolvedTarget, err := resolvePath(req.Root, globbed, true, pm)
if err != nil {
return errorResponse("copier: stat: error resolving %q: %v", globbed, err)
}
@@ -1032,6 +1081,14 @@ func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
s.Results = nil
s.Error = fmt.Sprintf("copier: stat: %q: %v", glob, syscall.ENOENT)
}
+ stats = append(stats, &s)
+ }
+ // no matches -> error
+ if len(stats) == 0 {
+ s := StatsForGlob{
+ Error: fmt.Sprintf("copier: stat: %q: %v", req.Globs, syscall.ENOENT),
+ }
+ stats = append(stats, &s)
}
return &response{Stat: statResponse{Globs: stats}}
}
@@ -1072,6 +1129,10 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
if len(queue) == 0 {
return errorResponse("copier: get: globs %v matched nothing (%d filtered out): %v", req.Globs, globMatchedCount, syscall.ENOENT)
}
+ topInfo, err := os.Stat(req.Directory)
+ if err != nil {
+ return errorResponse("copier: get: error reading info about directory %q: %v", req.Directory, err)
+ }
cb := func() error {
tw := tar.NewWriter(bulkWriter)
defer tw.Close()
@@ -1168,14 +1229,22 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
}
symlinkTarget = target
}
+ // if it's a directory and we're staying on one device, and it's on a
+ // different device than the one we started from, skip its contents
+ var ok error
+ if info.Mode().IsDir() && req.GetOptions.NoCrossDevice {
+ if !sameDevice(topInfo, info) {
+ ok = filepath.SkipDir
+ }
+ }
// add the item to the outgoing tar stream
if err := copierHandlerGetOne(info, symlinkTarget, rel, path, options, tw, hardlinkChecker, idMappings); err != nil {
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
- return nil
+ return ok
}
return err
}
- return nil
+ return ok
}
// walk the directory tree, checking/adding items individually
if err := filepath.Walk(item, walkfn); err != nil {
@@ -1463,7 +1532,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
}
return n, nil
}
- targetDirectory, err := resolvePath(req.Root, req.Directory, nil)
+ targetDirectory, err := resolvePath(req.Root, req.Directory, true, nil)
if err != nil {
return errorResponse("copier: put: error resolving %q: %v", req.Directory, err)
}
@@ -1568,7 +1637,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
if req.PutOptions.Rename != nil {
hdr.Linkname = handleRename(req.PutOptions.Rename, hdr.Linkname)
}
- if linkTarget, err = resolvePath(targetDirectory, filepath.Join(req.Root, filepath.FromSlash(hdr.Linkname)), nil); err != nil {
+ if linkTarget, err = resolvePath(targetDirectory, filepath.Join(req.Root, filepath.FromSlash(hdr.Linkname)), true, nil); err != nil {
return errors.Errorf("error resolving hardlink target path %q under root %q", hdr.Linkname, req.Root)
}
if err = os.Link(linkTarget, path); err != nil && os.IsExist(err) {
@@ -1742,7 +1811,7 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response,
dirUID, dirGID = hostDirPair.UID, hostDirPair.GID
}
- directory, err := resolvePath(req.Root, req.Directory, nil)
+ directory, err := resolvePath(req.Root, req.Directory, true, nil)
if err != nil {
return errorResponse("copier: mkdir: error resolving %q: %v", req.Directory, err)
}
@@ -1772,3 +1841,22 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response,
return &response{Error: "", Mkdir: mkdirResponse{}}, nil, nil
}
+
+func copierHandlerRemove(req request) *response {
+ errorResponse := func(fmtspec string, args ...interface{}) *response {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Remove: removeResponse{}}
+ }
+ resolvedTarget, err := resolvePath(req.Root, req.Directory, false, nil)
+ if err != nil {
+ return errorResponse("copier: remove: %v", err)
+ }
+ if req.RemoveOptions.All {
+ err = os.RemoveAll(resolvedTarget)
+ } else {
+ err = os.Remove(resolvedTarget)
+ }
+ if err != nil {
+ return errorResponse("copier: remove %q: %v", req.Directory, err)
+ }
+ return &response{Error: "", Remove: removeResponse{}}
+}
diff --git a/vendor/github.com/containers/buildah/copier/syscall_unix.go b/vendor/github.com/containers/buildah/copier/syscall_unix.go
index aa40f327c..9fc8fece3 100644
--- a/vendor/github.com/containers/buildah/copier/syscall_unix.go
+++ b/vendor/github.com/containers/buildah/copier/syscall_unix.go
@@ -4,6 +4,7 @@ package copier
import (
"os"
+ "syscall"
"time"
"github.com/pkg/errors"
@@ -73,6 +74,21 @@ func lutimes(isSymlink bool, path string, atime, mtime time.Time) error {
return unix.Lutimes(path, []unix.Timeval{unix.NsecToTimeval(atime.UnixNano()), unix.NsecToTimeval(mtime.UnixNano())})
}
+// sameDevice returns true unless we're sure that they're not on the same device
+func sameDevice(a, b os.FileInfo) bool {
+ aSys := a.Sys()
+ bSys := b.Sys()
+ if aSys == nil || bSys == nil {
+ return true
+ }
+ au, aok := aSys.(*syscall.Stat_t)
+ bu, bok := bSys.(*syscall.Stat_t)
+ if !aok || !bok {
+ return true
+ }
+ return au.Dev == bu.Dev
+}
+
const (
testModeMask = int64(os.ModePerm)
testIgnoreSymlinkDates = false
diff --git a/vendor/github.com/containers/buildah/copier/syscall_windows.go b/vendor/github.com/containers/buildah/copier/syscall_windows.go
index be50d473d..3a88d2d3e 100644
--- a/vendor/github.com/containers/buildah/copier/syscall_windows.go
+++ b/vendor/github.com/containers/buildah/copier/syscall_windows.go
@@ -77,6 +77,11 @@ func lutimes(isSymlink bool, path string, atime, mtime time.Time) error {
return windows.UtimesNano(path, []windows.Timespec{windows.NsecToTimespec(atime.UnixNano()), windows.NsecToTimespec(mtime.UnixNano())})
}
+// sameDevice returns true since we can't be sure that they're not on the same device
+func sameDevice(a, b os.FileInfo) bool {
+ return true
+}
+
const (
testModeMask = int64(0600)
testIgnoreSymlinkDates = true
diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go
index 635626a64..dd49c47c1 100644
--- a/vendor/github.com/containers/buildah/define/build.go
+++ b/vendor/github.com/containers/buildah/define/build.go
@@ -69,6 +69,8 @@ type CommonBuildOptions struct {
Ulimit []string
// Volumes to bind mount into the container
Volumes []string
+ // Secrets are the available secrets to use in a build
+ Secrets []string
}
// BuildOptions can be used to alter how an image is built.
diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go
index 6d4809cc0..45e85e138 100644
--- a/vendor/github.com/containers/buildah/define/types.go
+++ b/vendor/github.com/containers/buildah/define/types.go
@@ -28,7 +28,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.20.1-dev"
+ Version = "1.20.2-dev"
// DefaultRuntime if containers.conf fails.
DefaultRuntime = "runc"
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 075bdfb01..047c0aeba 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -4,19 +4,19 @@ go 1.12
require (
github.com/containernetworking/cni v0.8.1
- github.com/containers/common v0.35.4
- github.com/containers/image/v5 v5.10.5
- github.com/containers/ocicrypt v1.1.0
- github.com/containers/storage v1.28.1
+ github.com/containers/common v0.37.2-0.20210503193405-42134aa138ce
+ github.com/containers/image/v5 v5.11.1
+ github.com/containers/ocicrypt v1.1.1
+ github.com/containers/storage v1.30.1
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/go-units v0.4.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
github.com/fsouza/go-dockerclient v1.7.2
github.com/ghodss/yaml v1.0.0
github.com/hashicorp/go-multierror v1.1.1
- github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
+ github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect
github.com/mattn/go-shellwords v1.0.11
- github.com/onsi/ginkgo v1.15.2
+ github.com/onsi/ginkgo v1.16.1
github.com/onsi/gomega v1.11.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
@@ -24,9 +24,8 @@ require (
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d
github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.8.0
- github.com/openshift/imagebuilder v1.2.0
+ github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656
github.com/pkg/errors v0.9.1
- github.com/prometheus/procfs v0.6.0 // indirect
github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.1.3
@@ -34,9 +33,9 @@ require (
github.com/stretchr/testify v1.7.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
go.etcd.io/bbolt v1.3.5
- golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
+ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
- golang.org/x/sys v0.0.0-20210216224549-f992740a1bac
+ golang.org/x/sys v0.0.0-20210324051608-47abb6519492
k8s.io/klog v1.0.0 // indirect
)
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index 6a48853ac..232a8aac1 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -53,9 +53,11 @@ github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3h
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
-github.com/Microsoft/hcsshim v0.8.15 h1:Aof83YILRs2Vx3GhHqlvvfyx1asRJKMFIMeVlHsZKtI=
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
+github.com/Microsoft/hcsshim v0.8.16 h1:8/auA4LFIZFTGrqfKhGBSXwM6/4X1fHa/xniyEHu8ac=
+github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
+github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
@@ -97,7 +99,6 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@@ -106,24 +107,26 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
-github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s=
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
+github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
+github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
+github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102 h1:Qf4HiqfvmB7zS6scsmNgTLmByHbq8n9RTF39v+TzP7A=
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 h1:hkGVFjz+plgr5UfxZUTPFbUFIF/Km6/s+RVRIRHLrrY=
+github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
-github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
@@ -131,9 +134,12 @@ github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX
github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.5.0-beta.1 h1:IK6yirB4X7wpKyFSikWiT++nZsyIxGAAgNEv3fEGuls=
github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
+github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
+github.com/containerd/containerd v1.5.0-beta.4 h1:zjz4MOAOFgdBlwid2nNUlJ3YLpVi/97L36lfMYJex60=
+github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -145,12 +151,17 @@ github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
+github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
+github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
+github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
+github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@@ -160,26 +171,26 @@ github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kw
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
+github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
+github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI=
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
-github.com/containers/common v0.35.4 h1:szyWRncsHkBwCVpu1dkEOXUjkwCetlfcLmKJTwo1Sp8=
-github.com/containers/common v0.35.4/go.mod h1:rMzxgD7nMGw++cEbsp+NZv0UJO4rgXbm7F7IbJPTwIE=
-github.com/containers/image/v5 v5.10.5 h1:VK1UbsZMzjdw5Xqr3Im9h4iOqHWU0naFs+I78kavc7I=
-github.com/containers/image/v5 v5.10.5/go.mod h1:SgIbWEedCNBbn2FI5cH0/jed1Ecy2s8XK5zTxvJTzII=
+github.com/containers/common v0.37.2-0.20210503193405-42134aa138ce h1:e7VNmGqwfUQkw+D5bms262x1HYqxfN9/+t5SoaFnwTk=
+github.com/containers/common v0.37.2-0.20210503193405-42134aa138ce/go.mod h1:JjU+yvzIGyx8ZsY8nyf7snzs4VSNh1eIaYsqoSKBoRw=
+github.com/containers/image/v5 v5.11.1 h1:mNybUvU6zXUwcMsQaa3n+Idsru5pV+GE7k4oRuPzYi0=
+github.com/containers/image/v5 v5.11.1/go.mod h1:HC9lhJ/Nz5v3w/5Co7H431kLlgzlVlOC+auD/er3OqE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
-github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
-github.com/containers/ocicrypt v1.1.0 h1:A6UzSUFMla92uxO43O6lm86i7evMGjTY7wTKB2DyGPY=
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
-github.com/containers/storage v1.24.8/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ=
-github.com/containers/storage v1.28.0 h1:lA/9i9BIjfmIRxCI8GuzasYHmU4IUXVcfZZiDceD0Eg=
-github.com/containers/storage v1.28.0/go.mod h1:ixAwO7Bj31cigqPEG7aCz+PYmxkDxbIFdUFioYdxbzI=
-github.com/containers/storage v1.28.1 h1:axYBD+c0N0YkHelDoqzdLQXfY3fgb8pqIMsRHqUNGts=
-github.com/containers/storage v1.28.1/go.mod h1:5bwiMh2LkrN3AWIfDFMH7A/xbVNLcve+oeXYvHvW8cc=
+github.com/containers/ocicrypt v1.1.1 h1:prL8l9w3ntVqXvNH1CiNn5ENjcCnr38JqpSyvKKB4GI=
+github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/containers/storage v1.29.0/go.mod h1:u84RU4CCufGeJBNTRNwMB+FoE+AiFeFw4SsMoqAOeCM=
+github.com/containers/storage v1.30.1 h1:+87sZDoUp0uNsP45dWypHTWTEoy0eNDgFYjTU1XIRVQ=
+github.com/containers/storage v1.30.1/go.mod h1:NDJkiwxnSHD1Is+4DGcyR3SIEYSDOa0xnAW+uGQFx9E=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
@@ -212,13 +223,14 @@ github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8l
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8=
+github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v17.12.0-ce-rc1.0.20201020191947-73dc6a680cdd+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.3-0.20210216175712-646072ed6524+incompatible h1:Yu2uGErhwEoOT/OxAFe+/SiJCqRLs+pgcS5XKrDXnG4=
github.com/docker/docker v20.10.3-0.20210216175712-646072ed6524+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
@@ -279,6 +291,7 @@ github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=
@@ -321,6 +334,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -333,6 +348,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
+github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
@@ -373,7 +390,6 @@ github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
@@ -398,9 +414,11 @@ github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 h1:rw3IAne6CDuVFlZbPOkA7bhxlqawFh7RJJ+CejfMaxE=
-github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
+github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee h1:PAXLXk1heNZ5yokbMBpVLZQxo43wCZxRwl00mX+dd44=
+github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
+github.com/jinzhu/copier v0.3.0 h1:P5zN9OYSxmtzZmwgcVmt5Iu8egfP53BGMPAFgEksKPI=
+github.com/jinzhu/copier v0.3.0/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
@@ -418,15 +436,15 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.12 h1:famVnQVu7QwryBN4jNseQdUKES71ZAOnB6UQQJPZvqk=
-github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8=
+github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
@@ -437,6 +455,7 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a h1:weJVJJRzAJBFRlAiJQROKQs8oC9vOxvm4rZmBBk0ONw=
github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
+github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -450,10 +469,9 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
-github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg=
+github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
-github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-shellwords v1.0.11 h1:vCoR9VPpsk/TZFW2JwK5I9S0xdrtUq2bph6/YjEPnaw=
github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@@ -462,7 +480,6 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
@@ -476,7 +493,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM=
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
-github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
@@ -490,7 +506,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
-github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/mtrmac/gpgme v0.1.2 h1:dNOmvYmsrakgW7LcgiprD0yfRuQQe8/C8F6Z+zogO3s=
github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI=
@@ -511,8 +526,8 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.15.2 h1:l77YT15o814C2qVL47NOyjV/6RbaP7kKdrvZnxQ3Org=
-github.com/onsi/ginkgo v1.15.2/go.mod h1:Dd6YFfwBW84ETqqtL0CPyPXillHgY6XhQH3uuCCTr/o=
+github.com/onsi/ginkgo v1.16.1 h1:foqVmeWDD6yYpK+Yz3fHyNIxFYNxswxqNFjSKe+vI54=
+github.com/onsi/ginkgo v1.16.1/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -534,25 +549,22 @@ github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5X
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8=
github.com/opencontainers/runc v1.0.0-rc93 h1:x2UMpOOVf3kQ8arv/EsDGwim8PTNqzL1/EYDr/+scOM=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d h1:pNa8metDkwZjb9g4T8s+krQ+HRgZAkqnXml+wNir/+s=
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0 h1:+77ba4ar4jsCbL1GLbFL8fFM57w6suPfSS9PDLDY7KM=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
-github.com/openshift/imagebuilder v1.2.0 h1:uoZFjJICLlTMjlAL/UG2PA2kM8RjAsVflGfHJK7MMDk=
-github.com/openshift/imagebuilder v1.2.0/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
+github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656 h1:WaxyNFpmIDu4i6so9r6LVFIbSaXqsj8oitMitt86ae4=
+github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
@@ -599,11 +611,13 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -647,6 +661,7 @@ github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRci
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -660,8 +675,6 @@ github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmD
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/ulikunitz/xz v0.5.9 h1:RsKRIA2MO8x56wkkcd3LbtcE/uMszhb6DpRf+3uwa3I=
-github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
@@ -670,8 +683,8 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
-github.com/vbauerster/mpb/v5 v5.4.0 h1:n8JPunifvQvh6P1D1HAl2Ur9YcmKT1tpoUuiea5mlmg=
-github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8JDaKXR4JGI=
+github.com/vbauerster/mpb/v6 v6.0.3 h1:j+twHHhSUe8aXWaT/27E98G5cSBeqEuJSVCMjmLg0PI=
+github.com/vbauerster/mpb/v6 v6.0.3/go.mod h1:5luBx4rDLWxpA4t6I5sdeeQuZhqDxc+wr5Nqf35+tnM=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
@@ -720,12 +733,11 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
-golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -791,8 +803,9 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -804,9 +817,7 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -855,8 +866,8 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -868,13 +879,12 @@ golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210216224549-f992740a1bac h1:9glrpwtNjBYgRpb67AZJKHfzj1stG/8BL5H7In2oTC4=
golang.org/x/sys v0.0.0-20210216224549-f992740a1bac/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg=
+golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201113234701-d7a72108b828/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
-golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 51d18232a..92e0c3e8e 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -295,7 +295,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
if src == nil {
err2 := os.RemoveAll(path)
if err2 != nil {
- logrus.Errorf("error removing layer blob directory %q: %v", path, err)
+ logrus.Errorf("error removing layer blob directory: %v", err)
}
}
}()
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 062752274..62e656271 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -165,11 +165,22 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
}
func warnOnUnsetBuildArgs(node *parser.Node, args map[string]string) {
+ argFound := make(map[string]bool)
for _, child := range node.Children {
switch strings.ToUpper(child.Value) {
case "ARG":
argName := child.Next.Value
- if _, ok := args[argName]; !strings.Contains(argName, "=") && !ok {
+ if strings.Contains(argName, "=") {
+ res := strings.Split(argName, "=")
+ if res[1] != "" {
+ argFound[res[0]] = true
+ }
+ }
+ argHasValue := true
+ if !strings.Contains(argName, "=") {
+ argHasValue = argFound[argName]
+ }
+ if _, ok := args[argName]; !argHasValue && !ok {
logrus.Warnf("missing %q build argument. Try adding %q to the command line", argName, fmt.Sprintf("--build-arg %s=<VALUE>", argName))
}
default:
diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go
deleted file mode 100644
index 4dd49130d..000000000
--- a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package imagebuildah
-
-import (
- "flag"
- "fmt"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/containers/storage/pkg/reexec"
- "github.com/pkg/errors"
- "golang.org/x/sys/unix"
-)
-
-const (
- symlinkChrootedCommand = "chrootsymlinks-resolve"
- maxSymlinksResolved = 40
-)
-
-func init() {
- reexec.Register(symlinkChrootedCommand, resolveChrootedSymlinks)
-}
-
-// resolveSymlink uses a child subprocess to resolve any symlinks in filename
-// in the context of rootdir.
-func resolveSymlink(rootdir, filename string) (string, error) {
- // The child process expects a chroot and one path that
- // will be consulted relative to the chroot directory and evaluated
- // for any symbolic links present.
- cmd := reexec.Command(symlinkChrootedCommand, rootdir, filename)
- output, err := cmd.CombinedOutput()
- if err != nil {
- return "", errors.Wrapf(err, string(output))
- }
-
- // Hand back the resolved symlink, will be filename if a symlink is not found
- return string(output), nil
-}
-
-// main() for resolveSymlink()'s subprocess.
-func resolveChrootedSymlinks() {
- status := 0
- flag.Parse()
- if len(flag.Args()) < 2 {
- fmt.Fprintf(os.Stderr, "%s needs two arguments\n", symlinkChrootedCommand)
- os.Exit(1)
- }
- // Our first parameter is the directory to chroot into.
- if err := unix.Chdir(flag.Arg(0)); err != nil {
- fmt.Fprintf(os.Stderr, "chdir(): %v\n", err)
- os.Exit(1)
- }
- if err := unix.Chroot(flag.Arg(0)); err != nil {
- fmt.Fprintf(os.Stderr, "chroot(): %v\n", err)
- os.Exit(1)
- }
-
- // Our second parameter is the path name to evaluate for symbolic links
- symLink, err := getSymbolicLink(flag.Arg(1))
- if err != nil {
- fmt.Fprintf(os.Stderr, "error getting symbolic links: %v\n", err)
- os.Exit(1)
- }
- if _, err := os.Stdout.WriteString(symLink); err != nil {
- fmt.Fprintf(os.Stderr, "error writing string to stdout: %v\n", err)
- os.Exit(1)
- }
- os.Exit(status)
-}
-
-// getSymbolic link goes through each part of the path and continues resolving symlinks as they appear.
-// Returns what the whole target path for what "path" resolves to.
-func getSymbolicLink(path string) (string, error) {
- var (
- symPath string
- symLinksResolved int
- )
- // Splitting path as we need to resolve each part of the path at a time
- splitPath := strings.Split(path, "/")
- if splitPath[0] == "" {
- splitPath = splitPath[1:]
- symPath = "/"
- }
- for _, p := range splitPath {
- // If we have resolved 40 symlinks, that means something is terribly wrong
- // will return an error and exit
- if symLinksResolved >= maxSymlinksResolved {
- return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved)
- }
- symPath = filepath.Join(symPath, p)
- isSymlink, resolvedPath, err := hasSymlink(symPath)
- if err != nil {
- return "", err
- }
- // if isSymlink is true, check if resolvedPath is potentially another symlink
- // keep doing this till resolvedPath is not a symlink and isSymlink is false
- for isSymlink {
- // Need to keep track of number of symlinks resolved
- // Will also return an error if the symlink points to itself as that will exceed maxSymlinksResolved
- if symLinksResolved >= maxSymlinksResolved {
- return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved)
- }
- isSymlink, resolvedPath, err = hasSymlink(resolvedPath)
- if err != nil {
- return "", err
- }
- symLinksResolved++
- }
- // Assign resolvedPath to symPath. The next part of the loop will append the next part of the original path
- // and continue resolving
- symPath = resolvedPath
- symLinksResolved++
- }
- return symPath, nil
-}
-
-// hasSymlink returns true and the target if path is symlink
-// otherwise it returns false and path
-func hasSymlink(path string) (bool, string, error) {
- info, err := os.Lstat(path)
- if err != nil {
- if os.IsNotExist(err) {
- if err = os.MkdirAll(path, 0755); err != nil {
- return false, "", err
- }
- info, err = os.Lstat(path)
- if err != nil {
- return false, "", err
- }
- } else {
- return false, path, err
- }
- }
-
- // Return false and path as path if not a symlink
- if info.Mode()&os.ModeSymlink != os.ModeSymlink {
- return false, path, nil
- }
-
- // Read the symlink to get what it points to
- targetDir, err := os.Readlink(path)
- if err != nil {
- return false, "", err
- }
- // if the symlink points to a relative path, prepend the path till now to the resolved path
- if !filepath.IsAbs(targetDir) {
- targetDir = filepath.Join(filepath.Dir(path), targetDir)
- }
- // run filepath.Clean to remove the ".." from relative paths
- return true, filepath.Clean(targetDir), nil
-}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_unsupported.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_unsupported.go
deleted file mode 100644
index 2cec4fe21..000000000
--- a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_unsupported.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build !linux
-
-package imagebuildah
-
-import "github.com/pkg/errors"
-
-func resolveSymlink(rootdir, filename string) (string, error) {
- return "", errors.New("function not supported on non-linux systems")
-}
-
-func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) {
- return false, errors.New("function not supported on non-linux systems")
-}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go
index b7b339961..fc4753e35 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go
@@ -16,10 +16,12 @@ import (
"github.com/containers/buildah/define"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
+ "github.com/containers/common/libimage"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
is "github.com/containers/image/v5/storage"
+ storageTransport "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
@@ -117,6 +119,7 @@ type Executor struct {
imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs
fromOverride string
manifest string
+ secrets map[string]string
}
type imageTypeAndHistoryAndDiffIDs struct {
@@ -164,6 +167,11 @@ func NewExecutor(store storage.Store, options define.BuildOptions, mainNode *par
transientMounts = append([]Mount{Mount(mount)}, transientMounts...)
}
+ secrets, err := parse.Secrets(options.CommonBuildOpts.Secrets)
+ if err != nil {
+ return nil, err
+ }
+
jobs := 1
if options.Jobs != nil {
jobs = *options.Jobs
@@ -234,6 +242,7 @@ func NewExecutor(store storage.Store, options define.BuildOptions, mainNode *par
imageInfoCache: make(map[string]imageTypeAndHistoryAndDiffIDs),
fromOverride: options.From,
manifest: options.Manifest,
+ secrets: secrets,
}
if exec.err == nil {
exec.err = os.Stderr
@@ -301,22 +310,23 @@ func (b *Executor) startStage(ctx context.Context, stage *imagebuilder.Stage, st
// resolveNameToImageRef creates a types.ImageReference for the output name in local storage
func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, error) {
- imageRef, err := alltransports.ParseImageName(output)
+ if imageRef, err := alltransports.ParseImageName(output); err == nil {
+ return imageRef, nil
+ }
+ runtime, err := libimage.RuntimeFromStore(b.store, &libimage.RuntimeOptions{SystemContext: b.systemContext})
if err != nil {
- candidates, _, _, err := util.ResolveName(output, "", b.systemContext, b.store)
- if err != nil {
- return nil, errors.Wrapf(err, "error parsing target image name %q", output)
- }
- if len(candidates) == 0 {
- return nil, errors.Errorf("error parsing target image name %q", output)
- }
- imageRef2, err2 := is.Transport.ParseStoreReference(b.store, candidates[0])
- if err2 != nil {
- return nil, errors.Wrapf(err, "error parsing target image name %q", output)
- }
- return imageRef2, nil
+ return nil, err
}
- return imageRef, nil
+ resolved, err := runtime.ResolveName(output)
+ if err != nil {
+ return nil, err
+ }
+ imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, resolved)
+ if err == nil {
+ return imageRef, nil
+ }
+
+ return imageRef, err
}
// waitForStage waits for an entry to be added to terminatedStage indicating
@@ -661,19 +671,31 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
fmt.Fprintf(b.out, "[Warning] one or more build args were not consumed: %v\n", unusedList)
}
- if len(b.additionalTags) > 0 {
- if dest, err := b.resolveNameToImageRef(b.output); err == nil {
- switch dest.Transport().Name() {
- case is.Transport.Name():
- img, err := is.Transport.GetStoreImage(b.store, dest)
- if err != nil {
- return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
- }
+ // Add additional tags and print image names recorded in storage
+ if dest, err := b.resolveNameToImageRef(b.output); err == nil {
+ switch dest.Transport().Name() {
+ case is.Transport.Name():
+ img, err := is.Transport.GetStoreImage(b.store, dest)
+ if err != nil {
+ return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
+ }
+ if len(b.additionalTags) > 0 {
if err = util.AddImageNames(b.store, "", b.systemContext, img, b.additionalTags); err != nil {
return imageID, ref, errors.Wrapf(err, "error setting image names to %v", append(img.Names, b.additionalTags...))
}
logrus.Debugf("assigned names %v to image %q", img.Names, img.ID)
- default:
+ }
+ // Report back the caller the tags applied, if any.
+ img, err = is.Transport.GetStoreImage(b.store, dest)
+ if err != nil {
+ return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
+ }
+ for _, name := range img.Names {
+ fmt.Fprintf(b.out, "Successfully tagged %s\n", name)
+ }
+
+ default:
+ if len(b.additionalTags) > 0 {
logrus.Warnf("don't know how to add tags to images stored in %q transport", dest.Transport().Name())
}
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
index ff9abdda8..f1bee9366 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
@@ -24,7 +24,7 @@ import (
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
- "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chrootarchive"
docker "github.com/fsouza/go-dockerclient"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -81,12 +81,12 @@ func (s *StageExecutor) Preserve(path string) error {
// This path is already a subdirectory of a volume path that
// we're already preserving, so there's nothing new to be done
// except ensure that it exists.
- archivedPath := filepath.Join(s.mountPoint, path)
- if err := os.MkdirAll(archivedPath, 0755); err != nil {
+ createdDirPerms := os.FileMode(0755)
+ if err := copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return errors.Wrapf(err, "error ensuring volume path exists")
}
if err := s.volumeCacheInvalidate(path); err != nil {
- return errors.Wrapf(err, "error ensuring volume path %q is preserved", archivedPath)
+ return errors.Wrapf(err, "error ensuring volume path %q is preserved", filepath.Join(s.mountPoint, path))
}
return nil
}
@@ -102,16 +102,24 @@ func (s *StageExecutor) Preserve(path string) error {
// Try and resolve the symlink (if one exists)
// Set archivedPath and path based on whether a symlink is found or not
- if symLink, err := resolveSymlink(s.mountPoint, path); err == nil {
- archivedPath = filepath.Join(s.mountPoint, symLink)
- path = symLink
+ if evaluated, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, path), copier.EvalOptions{}); err == nil {
+ symLink, err := filepath.Rel(s.mountPoint, evaluated)
+ if err != nil {
+ return errors.Wrapf(err, "making evaluated path %q relative to %q", evaluated, s.mountPoint)
+ }
+ if strings.HasPrefix(symLink, ".."+string(os.PathSeparator)) {
+ return errors.Errorf("evaluated path %q was not below %q", evaluated, s.mountPoint)
+ }
+ archivedPath = evaluated
+ path = string(os.PathSeparator) + symLink
} else {
- return errors.Wrapf(err, "error reading symbolic link to %q", path)
+ return errors.Wrapf(err, "error evaluating path %q", path)
}
st, err := os.Stat(archivedPath)
if os.IsNotExist(err) {
- if err = os.MkdirAll(archivedPath, 0755); err != nil {
+ createdDirPerms := os.FileMode(0755)
+ if err = copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return errors.Wrapf(err, "error ensuring volume path exists")
}
st, err = os.Stat(archivedPath)
@@ -178,64 +186,85 @@ func (s *StageExecutor) volumeCacheInvalidate(path string) error {
return err
}
archivedPath := filepath.Join(s.mountPoint, cachedPath)
- logrus.Debugf("invalidated volume cache for %q from %q", archivedPath, s.volumeCache[cachedPath])
- delete(s.volumeCache, cachedPath)
+ logrus.Debugf("invalidated volume cache %q for %q from %q", archivedPath, path, s.volumeCache[cachedPath])
}
return nil
}
// Save the contents of each of the executor's list of volumes for which we
// don't already have a cache file.
-func (s *StageExecutor) volumeCacheSaveVFS() error {
+func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
for cachedPath, cacheFile := range s.volumeCache {
- archivedPath := filepath.Join(s.mountPoint, cachedPath)
- _, err := os.Stat(cacheFile)
+ archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
+ if err != nil {
+ return nil, errors.Wrapf(err, "error evaluating volume path")
+ }
+ relativePath, err := filepath.Rel(s.mountPoint, archivedPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error converting %q into a path relative to %q", archivedPath, s.mountPoint)
+ }
+ if strings.HasPrefix(relativePath, ".."+string(os.PathSeparator)) {
+ return nil, errors.Errorf("error converting %q into a path relative to %q", archivedPath, s.mountPoint)
+ }
+ _, err = os.Stat(cacheFile)
if err == nil {
logrus.Debugf("contents of volume %q are already cached in %q", archivedPath, cacheFile)
continue
}
if !os.IsNotExist(err) {
- return err
+ return nil, err
}
- if err := os.MkdirAll(archivedPath, 0755); err != nil {
- return errors.Wrapf(err, "error ensuring volume path exists")
+ createdDirPerms := os.FileMode(0755)
+ if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
+ return nil, errors.Wrapf(err, "error ensuring volume path exists")
}
logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile)
cache, err := os.Create(cacheFile)
if err != nil {
- return err
+ return nil, err
}
defer cache.Close()
- rc, err := archive.Tar(archivedPath, archive.Uncompressed)
+ rc, err := chrootarchive.Tar(archivedPath, nil, s.mountPoint)
if err != nil {
- return errors.Wrapf(err, "error archiving %q", archivedPath)
+ return nil, errors.Wrapf(err, "error archiving %q", archivedPath)
}
defer rc.Close()
_, err = io.Copy(cache, rc)
if err != nil {
- return errors.Wrapf(err, "error archiving %q to %q", archivedPath, cacheFile)
+ return nil, errors.Wrapf(err, "error archiving %q to %q", archivedPath, cacheFile)
+ }
+ mount := specs.Mount{
+ Source: archivedPath,
+ Destination: string(os.PathSeparator) + relativePath,
+ Type: "bind",
+ Options: []string{"private"},
}
+ mounts = append(mounts, mount)
}
- return nil
+ return nil, nil
}
// Restore the contents of each of the executor's list of volumes.
func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
for cachedPath, cacheFile := range s.volumeCache {
- archivedPath := filepath.Join(s.mountPoint, cachedPath)
+ archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
+ if err != nil {
+ return errors.Wrapf(err, "error evaluating volume path")
+ }
logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
cache, err := os.Open(cacheFile)
if err != nil {
return err
}
defer cache.Close()
- if err := os.RemoveAll(archivedPath); err != nil {
+ if err := copier.Remove(s.mountPoint, archivedPath, copier.RemoveOptions{All: true}); err != nil {
return err
}
- if err := os.MkdirAll(archivedPath, 0755); err != nil {
+ createdDirPerms := os.FileMode(0755)
+ if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return err
}
- err = archive.Untar(cache, archivedPath, nil)
+ err = chrootarchive.Untar(cache, archivedPath, nil)
if err != nil {
return errors.Wrapf(err, "error extracting archive at %q", archivedPath)
}
@@ -264,6 +293,10 @@ func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
// don't already have a cache file.
func (s *StageExecutor) volumeCacheSaveOverlay() (mounts []specs.Mount, err error) {
for cachedPath := range s.volumeCache {
+ err = copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.MkdirOptions{})
+ if err != nil {
+ return nil, errors.Wrapf(err, "ensuring volume exists")
+ }
volumePath := filepath.Join(s.mountPoint, cachedPath)
mount := specs.Mount{
Source: volumePath,
@@ -287,7 +320,7 @@ func (s *StageExecutor) volumeCacheSave() (mounts []specs.Mount, err error) {
case "overlay":
return s.volumeCacheSaveOverlay()
}
- return nil, s.volumeCacheSaveVFS()
+ return s.volumeCacheSaveVFS()
}
// Reset the contents of each of the executor's list of volumes.
@@ -372,7 +405,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
StripSetgidBit: stripSetgid,
}
if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil {
- return errors.Wrapf(err, "error adding sources %v", sources)
+ return err
}
}
return nil
@@ -411,6 +444,8 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
Quiet: s.executor.quiet,
NamespaceOptions: s.executor.namespaceOptions,
Terminal: buildah.WithoutTerminal,
+ Secrets: s.executor.secrets,
+ RunMounts: run.Mounts,
}
if config.NetworkDisabled {
options.ConfigureNetwork = buildah.NetworkDisabled
diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md
index 90e844c3e..4dc362911 100644
--- a/vendor/github.com/containers/buildah/install.md
+++ b/vendor/github.com/containers/buildah/install.md
@@ -4,19 +4,6 @@
## Installing packaged versions of buildah
-#### [Amazon Linux 2](https://aws.amazon.com/amazon-linux-2/)
-
-The [Kubic project](https://build.opensuse.org/project/show/devel:kubic:libcontainers:stable)
-provides updated packages for CentOS 7 which can be used unmodified on Amazon Linux 2.
-
-```bash
-cd /etc/yum.repos.d/
-sudo wget https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_7/devel:kubic:libcontainers:stable.repo
-sudo yum -y install yum-plugin-copr
-sudo yum -y copr enable lsm5/container-selinux
-sudo yum -y install buildah
-```
-
### [Arch Linux](https://www.archlinux.org)
```bash
@@ -34,26 +21,28 @@ sudo yum -y install buildah
```
The [Kubic project](https://build.opensuse.org/project/show/devel:kubic:libcontainers:stable)
-provides updated packages for CentOS 7, 8 and Stream.
+provides updated packages for CentOS 8 and CentOS 8 Stream.
```bash
-# CentOS 7
-sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_7/devel:kubic:libcontainers:stable.repo
-sudo yum -y install buildah
-
# CentOS 8
sudo dnf -y module disable container-tools
sudo dnf -y install 'dnf-command(copr)'
sudo dnf -y copr enable rhcontainerbot/container-selinux
sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_8/devel:kubic:libcontainers:stable.repo
-sudo dnf -y install buildah
+# OPTIONAL FOR RUNC USERS: crun will be installed by default. Install runc first if you prefer runc
+sudo dnf -y --refresh install runc
+# Install Buildah
+sudo dnf -y --refresh install buildah
-# CentOS Stream
+# CentOS 8 Stream
sudo dnf -y module disable container-tools
sudo dnf -y install 'dnf-command(copr)'
sudo dnf -y copr enable rhcontainerbot/container-selinux
sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_8_Stream/devel:kubic:libcontainers:stable.repo
-sudo dnf -y install buildah
+# OPTIONAL FOR RUNC USERS: crun will be installed by default. Install runc first if you prefer runc
+sudo dnf -y --refresh install runc
+# Install Buildah
+sudo dnf -y --refresh install buildah
```
@@ -69,36 +58,6 @@ sudo apt-get update
sudo apt-get -y install buildah
```
-If you would prefer newer (though not as well-tested) packages,
-the [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/buildah)
-provides packages for Debian 10 and newer. The packages in Kubic project repos are more frequently
-updated than the one in Debian's official repositories, due to how Debian works.
-The build sources for the Kubic packages can be found [here](https://gitlab.com/rhcontainerbot/buildah/-/tree/debian/debian).
-
-CAUTION: On Debian 11 and newer, including Testing and Sid/Unstable, we highly recommend you use Buildah, Podman and Skopeo ONLY from EITHER the Kubic repo
-OR the official Debian repos. Mixing and matching may lead to unpredictable situations including installation conflicts.
-
-```bash
-# Debian 10
-echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
-curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/Release.key | sudo apt-key add -
-sudo apt-get update
-sudo apt-get -y install buildah
-
-# Debian Testing
-echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Testing/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
-curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Testing/Release.key | sudo apt-key add -
-sudo apt-get update
-sudo apt-get -y install buildah
-
-# Debian Sid/Unstable
-echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Unstable/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
-curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Unstable/Release.key | sudo apt-key add -
-sudo apt-get update
-sudo apt-get -y install buildah
-```
-
-
### [Fedora](https://www.fedoraproject.org)
@@ -143,21 +102,6 @@ sudo subscription-manager repos --enable=rhel-7-server-extras-rpms
sudo yum -y install buildah
```
-#### [Raspberry Pi OS armhf (ex Raspbian)](https://www.raspberrypi.org/downloads/raspberry-pi-os/)
-
-The [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/buildah) provides
-packages for Raspbian 10.
-
-```bash
-# Raspbian 10
-echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Raspbian_10/ /' | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
-curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Raspbian_10/Release.key | sudo apt-key add -
-sudo apt-get update -qq
-sudo apt-get -qq -y install buildah
-```
-
-The build sources for the Kubic packages can be found [here](https://gitlab.com/rhcontainerbot/buildah/-/tree/debian/debian).
-
#### [Raspberry Pi OS arm64 (beta)](https://downloads.raspberrypi.org/raspios_arm64/images/)
Raspberry Pi OS use the standard Debian's repositories,
@@ -185,7 +129,7 @@ sudo apt-get -y install buildah
If you would prefer newer (though not as well-tested) packages,
the [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/buildah)
-provides packages for active Ubuntu releases 18.04 and newer (it should also work with direct derivatives like Pop!\_OS).
+provides packages for active Ubuntu releases 20.04 and newer (it should also work with direct derivatives like Pop!\_OS).
The packages in Kubic project repos are more frequently updated than the one in Ubuntu's official repositories, due to how Debian/Ubuntu works.
Checkout the Kubic project page for a list of supported Ubuntu version and architecture combinations.
The build sources for the Kubic packages can be found [here](https://gitlab.com/rhcontainerbot/buildah/-/tree/debian/debian).
diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go
index f29af1f5d..0293e4abd 100644
--- a/vendor/github.com/containers/buildah/new.go
+++ b/vendor/github.com/containers/buildah/new.go
@@ -4,18 +4,15 @@ import (
"context"
"fmt"
"math/rand"
- "runtime"
"strings"
"github.com/containers/buildah/define"
- "github.com/containers/buildah/util"
- "github.com/containers/image/v5/docker"
+ "github.com/containers/buildah/pkg/blobcache"
+ "github.com/containers/common/libimage"
+ "github.com/containers/common/pkg/config"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/pkg/shortnames"
- is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
- "github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
@@ -30,29 +27,6 @@ const (
BaseImageFakeName = imagebuilder.NoBaseImageSpecifier
)
-func pullAndFindImage(ctx context.Context, store storage.Store, srcRef types.ImageReference, options BuilderOptions, sc *types.SystemContext) (*storage.Image, types.ImageReference, error) {
- pullOptions := PullOptions{
- ReportWriter: options.ReportWriter,
- Store: store,
- SystemContext: options.SystemContext,
- BlobDirectory: options.BlobDirectory,
- MaxRetries: options.MaxPullRetries,
- RetryDelay: options.PullRetryDelay,
- OciDecryptConfig: options.OciDecryptConfig,
- }
- ref, err := pullImage(ctx, store, srcRef, pullOptions, sc)
- if err != nil {
- logrus.Debugf("error pulling image %q: %v", transports.ImageName(srcRef), err)
- return nil, nil, err
- }
- img, err := is.Transport.GetStoreImage(store, ref)
- if err != nil {
- logrus.Debugf("error reading pulled image %q: %v", transports.ImageName(srcRef), err)
- return nil, nil, errors.Wrapf(err, "error locating image %q in local storage", transports.ImageName(ref))
- }
- return img, ref, nil
-}
-
func getImageName(name string, img *storage.Image) string {
imageName := name
if len(img.Names) > 0 {
@@ -105,187 +79,6 @@ func newContainerIDMappingOptions(idmapOptions *define.IDMappingOptions) storage
return options
}
-func resolveLocalImage(systemContext *types.SystemContext, store storage.Store, options BuilderOptions) (types.ImageReference, string, string, *storage.Image, error) {
- candidates, _, _, err := util.ResolveName(options.FromImage, options.Registry, systemContext, store)
- if err != nil {
- return nil, "", "", nil, errors.Wrapf(err, "error resolving local image %q", options.FromImage)
- }
- for _, imageName := range candidates {
- img, err := store.Image(imageName)
- if err != nil {
- if errors.Cause(err) == storage.ErrImageUnknown {
- continue
- }
- return nil, "", "", nil, err
- }
- ref, err := is.Transport.ParseStoreReference(store, img.ID)
- if err != nil {
- return nil, "", "", nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID)
- }
- return ref, ref.Transport().Name(), imageName, img, nil
- }
-
- return nil, "", "", nil, nil
-}
-
-func imageMatch(ctx context.Context, ref types.ImageReference, systemContext *types.SystemContext) bool {
- img, err := ref.NewImage(ctx, systemContext)
- if err != nil {
- logrus.Warnf("Failed to create newImage in imageMatch: %v", err)
- return false
- }
- defer img.Close()
- data, err := img.Inspect(ctx)
- if err != nil {
- logrus.Warnf("Failed to inspect img %s: %v", ref, err)
- return false
- }
- os := systemContext.OSChoice
- if os == "" {
- os = runtime.GOOS
- }
- arch := systemContext.ArchitectureChoice
- if arch == "" {
- arch = runtime.GOARCH
- }
- if os == data.Os && arch == data.Architecture {
- if systemContext.VariantChoice == "" || systemContext.VariantChoice == data.Variant {
- return true
- }
- }
- return false
-}
-
-func resolveImage(ctx context.Context, systemContext *types.SystemContext, store storage.Store, options BuilderOptions) (types.ImageReference, string, *storage.Image, error) {
- if systemContext == nil {
- systemContext = &types.SystemContext{}
- }
-
- fromImage := options.FromImage
- // If the image name includes a transport we can use it as it. Special
- // treatment for docker references which are subject to pull policies
- // that we're handling below.
- srcRef, err := alltransports.ParseImageName(options.FromImage)
- if err == nil {
- if srcRef.Transport().Name() == docker.Transport.Name() {
- fromImage = srcRef.DockerReference().String()
- } else {
- pulledImg, pulledReference, err := pullAndFindImage(ctx, store, srcRef, options, systemContext)
- return pulledReference, srcRef.Transport().Name(), pulledImg, err
- }
- }
-
- localImageRef, _, localImageName, localImage, err := resolveLocalImage(systemContext, store, options)
- if err != nil {
- return nil, "", nil, err
- }
-
- // If we could resolve the image locally, check if it was clearly
- // referring to a local image, either by ID or digest. In that case,
- // we don't need to perform a remote lookup.
- if localImage != nil && (strings.HasPrefix(localImage.ID, options.FromImage) || strings.HasPrefix(options.FromImage, "sha256:")) {
- return localImageRef, localImageRef.Transport().Name(), localImage, nil
- }
-
- if options.PullPolicy == define.PullNever || options.PullPolicy == define.PullIfMissing {
- if localImage != nil && imageMatch(ctx, localImageRef, systemContext) {
- return localImageRef, localImageRef.Transport().Name(), localImage, nil
- }
- if options.PullPolicy == define.PullNever {
- return nil, "", nil, errors.Errorf("pull policy is %q but %q could not be found locally", "never", options.FromImage)
- }
- }
-
- // If we found a local image, we must use it's name.
- // See #2904.
- if localImageRef != nil {
- fromImage = localImageName
- }
-
- resolved, err := shortnames.Resolve(systemContext, fromImage)
- if err != nil {
- return nil, "", nil, err
- }
-
- // Print the image-resolution description unless we're looking for a
- // new image and already found a local image. In many cases, the
- // description will be more confusing than helpful (e.g., `buildah from
- // localImage`).
- if desc := resolved.Description(); len(desc) > 0 {
- logrus.Debug(desc)
- if !(options.PullPolicy == define.PullIfNewer && localImage != nil) {
- if options.ReportWriter != nil {
- if _, err := options.ReportWriter.Write([]byte(desc + "\n")); err != nil {
- return nil, "", nil, err
- }
- }
- }
- }
-
- var pullErrors []error
- for _, pullCandidate := range resolved.PullCandidates {
- ref, err := docker.NewReference(pullCandidate.Value)
- if err != nil {
- return nil, "", nil, err
- }
-
- // We're tasked to pull a "newer" image. If there's no local
- // image, we have no base for comparison, so we'll pull the
- // first available image.
- //
- // If there's a local image, the `pullCandidate` is considered
- // to be newer if its time stamp differs from the local one.
- // Otherwise, we don't pull and skip it.
- if options.PullPolicy == define.PullIfNewer && localImage != nil {
- remoteImage, err := ref.NewImage(ctx, systemContext)
- if err != nil {
- logrus.Debugf("unable to remote-inspect image %q: %v", pullCandidate.Value.String(), err)
- pullErrors = append(pullErrors, err)
- continue
- }
- defer remoteImage.Close()
-
- remoteData, err := remoteImage.Inspect(ctx)
- if err != nil {
- logrus.Debugf("unable to remote-inspect image %q: %v", pullCandidate.Value.String(), err)
- pullErrors = append(pullErrors, err)
- continue
- }
-
- // FIXME: we should compare image digests not time stamps.
- // Comparing time stamps is flawed. Be aware that fixing
- // it may entail non-trivial changes to the tests. Please
- // refer to https://github.com/containers/buildah/issues/2779
- // for more.
- if localImage.Created.Equal(*remoteData.Created) {
- continue
- }
- }
-
- pulledImg, pulledReference, err := pullAndFindImage(ctx, store, ref, options, systemContext)
- if err != nil {
- logrus.Debugf("unable to pull and read image %q: %v", pullCandidate.Value.String(), err)
- pullErrors = append(pullErrors, err)
- continue
- }
-
- // Make sure to record the short-name alias if necessary.
- if err = pullCandidate.Record(); err != nil {
- return nil, "", nil, err
- }
-
- return pulledReference, "", pulledImg, nil
- }
-
- // If we were looking for a newer image but could not find one, return
- // the local image if present.
- if options.PullPolicy == define.PullIfNewer && localImage != nil {
- return localImageRef, localImageRef.Transport().Name(), localImage, nil
- }
-
- return nil, "", nil, resolved.FormatPullErrors(pullErrors)
-}
-
func containerNameExist(name string, containers []storage.Container) bool {
for _, container := range containers {
for _, cname := range container.Names {
@@ -313,6 +106,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
img *storage.Image
err error
)
+
if options.FromImage == BaseImageFakeName {
options.FromImage = ""
}
@@ -320,11 +114,45 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath)
if options.FromImage != "" && options.FromImage != "scratch" {
- ref, _, img, err = resolveImage(ctx, systemContext, store, options)
+ imageRuntime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return nil, err
+ }
+
+ pullPolicy, err := config.ParsePullPolicy(options.PullPolicy.String())
if err != nil {
return nil, err
}
+
+ // Note: options.Format does *not* relate to the image we're
+ // about to pull (see tests/digests.bats). So we're not
+ // forcing a MIMEType in the pullOptions below.
+ pullOptions := libimage.PullOptions{}
+ pullOptions.RetryDelay = &options.PullRetryDelay
+ pullOptions.OciDecryptConfig = options.OciDecryptConfig
+ pullOptions.SignaturePolicyPath = options.SignaturePolicyPath
+ pullOptions.Writer = options.ReportWriter
+
+ maxRetries := uint(options.MaxPullRetries)
+ pullOptions.MaxRetries = &maxRetries
+
+ if options.BlobDirectory != "" {
+ pullOptions.DestinationLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal)
+ }
+
+ pulledImages, err := imageRuntime.Pull(ctx, options.FromImage, pullPolicy, &pullOptions)
+ if err != nil {
+ return nil, err
+ }
+ if len(pulledImages) > 0 {
+ img = pulledImages[0].StorageImage()
+ ref, err = pulledImages[0].StorageReference()
+ if err != nil {
+ return nil, err
+ }
+ }
}
+
imageSpec := options.FromImage
imageID := ""
imageDigest := ""
diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
index f3876cd13..8dadec130 100644
--- a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
+++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
@@ -10,6 +10,7 @@ import (
"sync"
"github.com/containers/buildah/docker"
+ "github.com/containers/common/libimage"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/manifest"
@@ -82,6 +83,21 @@ func makeFilename(blobSum digest.Digest, isConfig bool) string {
return blobSum.String()
}
+// CacheLookupReferenceFunc wraps a BlobCache into a
+// libimage.LookupReferenceFunc to allow for using a BlobCache during
+// image-copy operations.
+func CacheLookupReferenceFunc(directory string, compress types.LayerCompression) libimage.LookupReferenceFunc {
+ // NOTE: this prevents us from moving BlobCache around and generalizes
+ // the libimage API.
+ return func(ref types.ImageReference) (types.ImageReference, error) {
+ ref, err := NewBlobCache(ref, directory, compress)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error using blobcache %q", directory)
+ }
+ return ref, nil
+ }
+}
+
// NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are
// written to the destination image created from the resulting reference will also be stored
// as-is to the specified directory or a temporary directory. The cache directory's contents
@@ -141,7 +157,7 @@ func (r *blobCacheReference) HasBlob(blobinfo types.BlobInfo) (bool, int64, erro
return true, fileInfo.Size(), nil
}
if !os.IsNotExist(err) {
- return false, -1, errors.Wrapf(err, "error checking size of %q", filename)
+ return false, -1, errors.Wrap(err, "checking size")
}
}
@@ -155,7 +171,7 @@ func (r *blobCacheReference) Directory() string {
func (r *blobCacheReference) ClearCache() error {
f, err := os.Open(r.directory)
if err != nil {
- return errors.Wrapf(err, "error opening directory %q", r.directory)
+ return errors.WithStack(err)
}
defer f.Close()
names, err := f.Readdirnames(-1)
@@ -165,7 +181,7 @@ func (r *blobCacheReference) ClearCache() error {
for _, name := range names {
pathname := filepath.Join(r.directory, name)
if err = os.RemoveAll(pathname); err != nil {
- return errors.Wrapf(err, "error removing %q while clearing cache for %q", pathname, transports.ImageName(r))
+ return errors.Wrapf(err, "clearing cache for %q", transports.ImageName(r))
}
}
return nil
@@ -216,7 +232,7 @@ func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *diges
}
if !os.IsNotExist(err) {
s.cacheErrors++
- return nil, "", errors.Wrapf(err, "error checking for manifest file %q", filename)
+ return nil, "", errors.Wrap(err, "checking for manifest file")
}
}
s.cacheMisses++
@@ -246,7 +262,7 @@ func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo,
s.mu.Lock()
s.cacheErrors++
s.mu.Unlock()
- return nil, -1, errors.Wrapf(err, "error checking for cache file %q", filepath.Join(s.reference.directory, filename))
+ return nil, -1, errors.Wrap(err, "checking for cache")
}
}
}
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index 9c3c8cfe0..6e59dbe64 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -64,7 +64,6 @@ type BudResults struct {
Iidfile string
Label []string
Logfile string
- Loglevel int
Manifest string
NoCache bool
Timestamp int64
@@ -75,6 +74,7 @@ type BudResults struct {
Rm bool
Runtime string
RuntimeFlags []string
+ Secrets []string
SignaturePolicy string
SignBy string
Squash bool
@@ -191,7 +191,10 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel")
fs.StringArrayVar(&flags.Label, "label", []string{}, "Set metadata for an image (default [])")
fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
- fs.IntVar(&flags.Loglevel, "loglevel", 0, "adjust logging level (range from -2 to 3)")
+ fs.Int("loglevel", 0, "NO LONGER USED, flag ignored, and hidden")
+ if err := fs.MarkHidden("loglevel"); err != nil {
+ panic(fmt.Sprintf("error marking the loglevel flag as hidden: %v", err))
+ }
fs.BoolVar(&flags.LogRusage, "log-rusage", false, "log resource usage at each build step")
if err := fs.MarkHidden("log-rusage"); err != nil {
panic(fmt.Sprintf("error marking the log-rusage flag as hidden: %v", err))
@@ -207,6 +210,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.BoolVar(&flags.Rm, "rm", true, "Remove intermediate containers after a successful build")
// "runtime" definition moved to avoid name collision in podman build. Defined in cmd/buildah/bud.go.
fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime")
+ fs.StringArrayVar(&flags.Secrets, "secret", []string{}, "secret file to expose to the build")
fs.StringVar(&flags.SignBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`")
fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
if err := fs.MarkHidden("signature-policy"); err != nil {
@@ -240,11 +244,11 @@ func GetBudFlagsCompletions() commonComp.FlagCompletions {
flagCompletion["jobs"] = commonComp.AutocompleteNone
flagCompletion["label"] = commonComp.AutocompleteNone
flagCompletion["logfile"] = commonComp.AutocompleteDefault
- flagCompletion["loglevel"] = commonComp.AutocompleteDefault
flagCompletion["manifest"] = commonComp.AutocompleteDefault
flagCompletion["os"] = commonComp.AutocompleteNone
flagCompletion["platform"] = commonComp.AutocompleteNone
flagCompletion["runtime-flag"] = commonComp.AutocompleteNone
+ flagCompletion["secret"] = commonComp.AutocompleteNone
flagCompletion["sign-by"] = commonComp.AutocompleteNone
flagCompletion["signature-policy"] = commonComp.AutocompleteNone
flagCompletion["tag"] = commonComp.AutocompleteNone
@@ -403,6 +407,8 @@ func AliasFlags(f *pflag.FlagSet, name string) pflag.NormalizedName {
name = "os"
case "purge":
name = "rm"
+ case "tty":
+ name = "terminal"
}
return pflag.NormalizedName(name)
}
diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
index 462561983..d1b8955bb 100644
--- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
+++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
@@ -174,15 +174,15 @@ func recreate(contentDir string) error {
if os.IsNotExist(err) {
return nil
}
- return errors.Wrapf(err, "failed to stat overlay upper %s directory", contentDir)
+ return errors.Wrap(err, "failed to stat overlay upper directory")
}
if err := os.RemoveAll(contentDir); err != nil {
- return errors.Wrapf(err, "failed to cleanup overlay %s directory", contentDir)
+ return errors.WithStack(err)
}
if err := idtools.MkdirAllAs(contentDir, os.FileMode(st.Mode()), int(st.UID()), int(st.GID())); err != nil {
- return errors.Wrapf(err, "failed to create the overlay %s directory", contentDir)
+ return errors.Wrap(err, "failed to create overlay directory")
}
return nil
}
@@ -208,7 +208,7 @@ func CleanupContent(containerDir string) (Err error) {
if os.IsNotExist(err) {
return nil
}
- return errors.Wrapf(err, "read directory")
+ return errors.Wrap(err, "read directory")
}
for _, f := range files {
dir := filepath.Join(contentDir, f.Name())
@@ -218,7 +218,7 @@ func CleanupContent(containerDir string) (Err error) {
}
if err := os.RemoveAll(contentDir); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "failed to cleanup overlay %s directory", contentDir)
+ return errors.Wrap(err, "failed to cleanup overlay directory")
}
return nil
}
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index 2ae07efe9..462ac212e 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -125,6 +125,8 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) {
ulimit, _ = c.Flags().GetStringSlice("ulimit")
}
+ secrets, _ := c.Flags().GetStringArray("secret")
+
commonOpts := &define.CommonBuildOptions{
AddHost: addHost,
CPUPeriod: cpuPeriod,
@@ -142,6 +144,7 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) {
ShmSize: c.Flag("shm-size").Value.String(),
Ulimit: ulimit,
Volumes: volumes,
+ Secrets: secrets,
}
securityOpts, _ := c.Flags().GetStringArray("security-opt")
if err := parseSecurityOpts(securityOpts, commonOpts); err != nil {
@@ -178,11 +181,11 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti
commonOpts.SeccompProfilePath = SeccompOverridePath
} else {
if !os.IsNotExist(err) {
- return errors.Wrapf(err, "can't check if %q exists", SeccompOverridePath)
+ return errors.WithStack(err)
}
if _, err := os.Stat(SeccompDefaultPath); err != nil {
if !os.IsNotExist(err) {
- return errors.Wrapf(err, "can't check if %q exists", SeccompDefaultPath)
+ return errors.WithStack(err)
}
} else {
commonOpts.SeccompProfilePath = SeccompDefaultPath
@@ -454,7 +457,7 @@ func ValidateVolumeHostDir(hostDir string) error {
}
if filepath.IsAbs(hostDir) {
if _, err := os.Stat(hostDir); err != nil {
- return errors.Wrapf(err, "error checking path %q", hostDir)
+ return errors.WithStack(err)
}
}
// If hostDir is not an absolute path, that means the user wants to create a
@@ -468,7 +471,7 @@ func validateVolumeMountHostDir(hostDir string) error {
return errors.Errorf("invalid host path, must be an absolute path %q", hostDir)
}
if _, err := os.Stat(hostDir); err != nil {
- return errors.Wrapf(err, "error checking path %q", hostDir)
+ return errors.WithStack(err)
}
return nil
}
@@ -587,6 +590,14 @@ func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) {
ctx.OCIInsecureSkipTLSVerify = !tlsVerify
ctx.DockerDaemonInsecureSkipTLSVerify = !tlsVerify
}
+ disableCompression, err := c.Flags().GetBool("disable-compression")
+ if err == nil {
+ if disableCompression {
+ ctx.OCIAcceptUncompressedLayers = true
+ } else {
+ ctx.DirForceCompress = true
+ }
+ }
creds, err := c.Flags().GetString("creds")
if err == nil && c.Flag("creds").Changed {
var err error
@@ -832,7 +843,7 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio
default:
how = strings.TrimPrefix(how, "ns:")
if _, err := os.Stat(how); err != nil {
- return nil, nil, errors.Wrapf(err, "error checking for %s namespace at %q", string(specs.UserNamespace), how)
+ return nil, nil, errors.Wrapf(err, "checking %s namespace", string(specs.UserNamespace))
}
logrus.Debugf("setting %q namespace to %q", string(specs.UserNamespace), how)
usernsOption.Path = how
@@ -922,7 +933,7 @@ func NamespaceOptions(c *cobra.Command) (namespaceOptions define.NamespaceOption
}
how = strings.TrimPrefix(how, "ns:")
if _, err := os.Stat(how); err != nil {
- return nil, define.NetworkDefault, errors.Wrapf(err, "error checking for %s namespace", what)
+ return nil, define.NetworkDefault, errors.Wrapf(err, "checking %s namespace", what)
}
policy = define.NetworkEnabled
logrus.Debugf("setting %q namespace to %q", what, how)
@@ -1043,3 +1054,37 @@ func GetTempDir() string {
}
return "/var/tmp"
}
+
+// Secrets parses the --secret flag
+func Secrets(secrets []string) (map[string]string, error) {
+ parsed := make(map[string]string)
+ invalidSyntax := errors.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar")
+ for _, secret := range secrets {
+ split := strings.Split(secret, ",")
+ if len(split) > 2 {
+ return nil, invalidSyntax
+ }
+ if len(split) == 2 {
+ id := strings.Split(split[0], "=")
+ src := strings.Split(split[1], "=")
+ if len(split) == 2 && strings.ToLower(id[0]) == "id" && strings.ToLower(src[0]) == "src" {
+ fullPath, err := filepath.Abs(src[1])
+ if err != nil {
+ return nil, err
+ }
+ _, err = os.Stat(fullPath)
+ if err == nil {
+ parsed[id[1]] = fullPath
+ }
+ if err != nil {
+ return nil, errors.Wrap(err, "could not parse secrets")
+ }
+ } else {
+ return nil, invalidSyntax
+ }
+ } else {
+ return nil, invalidSyntax
+ }
+ }
+ return parsed, nil
+}
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index 04eac5821..7149ac986 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -3,28 +3,16 @@ package buildah
import (
"context"
"io"
- "strings"
"time"
"github.com/containers/buildah/define"
"github.com/containers/buildah/pkg/blobcache"
- "github.com/containers/image/v5/directory"
- "github.com/containers/image/v5/docker"
- dockerarchive "github.com/containers/image/v5/docker/archive"
- "github.com/containers/image/v5/docker/reference"
- tarfile "github.com/containers/image/v5/docker/tarfile"
- ociarchive "github.com/containers/image/v5/oci/archive"
- oci "github.com/containers/image/v5/oci/layout"
- "github.com/containers/image/v5/signature"
- is "github.com/containers/image/v5/storage"
- "github.com/containers/image/v5/transports"
- "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/common/libimage"
+ "github.com/containers/common/pkg/config"
"github.com/containers/image/v5/types"
encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
- multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
// PullOptions can be used to alter how an image is copied in from somewhere.
@@ -65,258 +53,44 @@ type PullOptions struct {
PullPolicy define.PullPolicy
}
-func localImageNameForReference(ctx context.Context, store storage.Store, srcRef types.ImageReference) (string, error) {
- if srcRef == nil {
- return "", errors.Errorf("reference to image is empty")
- }
- var name string
- switch srcRef.Transport().Name() {
- case dockerarchive.Transport.Name():
- file := srcRef.StringWithinTransport()
- tarSource, err := tarfile.NewSourceFromFile(file)
- if err != nil {
- return "", errors.Wrapf(err, "error opening tarfile %q as a source image", file)
- }
- defer tarSource.Close()
- manifest, err := tarSource.LoadTarManifest()
- if err != nil {
- return "", errors.Errorf("error retrieving manifest.json from tarfile %q: %v", file, err)
- }
- // to pull the first image stored in the tar file
- if len(manifest) == 0 {
- // use the hex of the digest if no manifest is found
- name, err = getImageDigest(ctx, srcRef, nil)
- if err != nil {
- return "", err
- }
- } else {
- if len(manifest[0].RepoTags) > 0 {
- name = manifest[0].RepoTags[0]
- } else {
- // If the input image has no repotags, we need to feed it a dest anyways
- name, err = getImageDigest(ctx, srcRef, nil)
- if err != nil {
- return "", err
- }
- }
- }
- case ociarchive.Transport.Name():
- // retrieve the manifest from index.json to access the image name
- manifest, err := ociarchive.LoadManifestDescriptor(srcRef)
- if err != nil {
- return "", errors.Wrapf(err, "error loading manifest for %q", transports.ImageName(srcRef))
- }
- // if index.json has no reference name, compute the image digest instead
- if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" {
- name, err = getImageDigest(ctx, srcRef, nil)
- if err != nil {
- return "", err
- }
- } else {
- name = manifest.Annotations["org.opencontainers.image.ref.name"]
- }
- case directory.Transport.Name():
- // supports pull from a directory
- name = toLocalImageName(srcRef.StringWithinTransport())
- case oci.Transport.Name():
- // supports pull from a directory
- split := strings.SplitN(srcRef.StringWithinTransport(), ":", 2)
- name = toLocalImageName(split[0])
- default:
- ref := srcRef.DockerReference()
- if ref == nil {
- name = srcRef.StringWithinTransport()
- _, err := is.Transport.ParseStoreReference(store, name)
- if err == nil {
- return name, nil
- }
- logrus.Debugf("error parsing local storage reference %q: %v", name, err)
- if strings.LastIndex(name, "/") != -1 {
- name = name[strings.LastIndex(name, "/")+1:]
- _, err = is.Transport.ParseStoreReference(store, name)
- if err == nil {
- return name, errors.Wrapf(err, "error parsing local storage reference %q", name)
- }
- }
- return "", errors.Errorf("reference to image %q is not a named reference", transports.ImageName(srcRef))
- }
-
- if named, ok := ref.(reference.Named); ok {
- name = named.Name()
- if namedTagged, ok := ref.(reference.NamedTagged); ok {
- name = name + ":" + namedTagged.Tag()
- }
- if canonical, ok := ref.(reference.Canonical); ok {
- name = name + "@" + canonical.Digest().String()
- }
- }
- }
-
- if _, err := is.Transport.ParseStoreReference(store, name); err != nil {
- return "", errors.Wrapf(err, "error parsing computed local image name %q", name)
- }
- return name, nil
-}
-
// Pull copies the contents of the image from somewhere else to local storage. Returns the
// ID of the local image or an error.
func Pull(ctx context.Context, imageName string, options PullOptions) (imageID string, err error) {
- systemContext := getSystemContext(options.Store, options.SystemContext, options.SignaturePolicyPath)
-
- boptions := BuilderOptions{
- FromImage: imageName,
- SignaturePolicyPath: options.SignaturePolicyPath,
- SystemContext: systemContext,
- BlobDirectory: options.BlobDirectory,
- ReportWriter: options.ReportWriter,
- MaxPullRetries: options.MaxRetries,
- PullRetryDelay: options.RetryDelay,
- OciDecryptConfig: options.OciDecryptConfig,
- PullPolicy: options.PullPolicy,
- }
-
- if !options.AllTags {
- _, _, img, err := resolveImage(ctx, systemContext, options.Store, boptions)
- if err != nil {
- return "", err
- }
- return img.ID, nil
- }
-
- srcRef, err := alltransports.ParseImageName(imageName)
- if err == nil && srcRef.Transport().Name() != docker.Transport.Name() {
- return "", errors.New("Non-docker transport is not supported, for --all-tags pulling")
- }
-
- storageRef, _, _, err := resolveImage(ctx, systemContext, options.Store, boptions)
- if err != nil {
- return "", err
- }
-
- var errs *multierror.Error
- repo := reference.TrimNamed(storageRef.DockerReference())
- dockerRef, err := docker.NewReference(reference.TagNameOnly(storageRef.DockerReference()))
- if err != nil {
- return "", errors.Wrapf(err, "internal error creating docker.Transport reference for %s", storageRef.DockerReference().String())
- }
- tags, err := docker.GetRepositoryTags(ctx, systemContext, dockerRef)
- if err != nil {
- return "", errors.Wrapf(err, "error getting repository tags")
- }
- for _, tag := range tags {
- tagged, err := reference.WithTag(repo, tag)
- if err != nil {
- errs = multierror.Append(errs, err)
- continue
- }
- taggedRef, err := docker.NewReference(tagged)
- if err != nil {
- return "", errors.Wrapf(err, "internal error creating docker.Transport reference for %s", tagged.String())
- }
- if options.ReportWriter != nil {
- if _, err := options.ReportWriter.Write([]byte("Pulling " + tagged.String() + "\n")); err != nil {
- return "", errors.Wrapf(err, "error writing pull report")
- }
- }
- ref, err := pullImage(ctx, options.Store, taggedRef, options, systemContext)
- if err != nil {
- errs = multierror.Append(errs, err)
- continue
- }
- taggedImg, err := is.Transport.GetStoreImage(options.Store, ref)
- if err != nil {
- errs = multierror.Append(errs, err)
- continue
- }
- imageID = taggedImg.ID
- }
-
- return imageID, errs.ErrorOrNil()
-}
+ libimageOptions := &libimage.PullOptions{}
+ libimageOptions.SignaturePolicyPath = options.SignaturePolicyPath
+ libimageOptions.Writer = options.ReportWriter
+ libimageOptions.RemoveSignatures = options.RemoveSignatures
+ libimageOptions.OciDecryptConfig = options.OciDecryptConfig
+ libimageOptions.AllTags = options.AllTags
+ libimageOptions.RetryDelay = &options.RetryDelay
-func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageReference, options PullOptions, sc *types.SystemContext) (types.ImageReference, error) {
- blocked, err := isReferenceBlocked(srcRef, sc)
- if err != nil {
- return nil, errors.Wrapf(err, "error checking if pulling from registry for %q is blocked", transports.ImageName(srcRef))
- }
- if blocked {
- return nil, errors.Errorf("pull access to registry for %q is blocked by configuration", transports.ImageName(srcRef))
- }
- insecure, err := checkRegistrySourcesAllows("pull from", srcRef)
- if err != nil {
- return nil, err
- }
- if insecure {
- if sc.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
- return nil, errors.Errorf("can't require tls verification on an insecured registry")
- }
- sc.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
- sc.OCIInsecureSkipTLSVerify = true
- sc.DockerDaemonInsecureSkipTLSVerify = true
- }
-
- destName, err := localImageNameForReference(ctx, store, srcRef)
- if err != nil {
- return nil, errors.Wrapf(err, "error computing local image name for %q", transports.ImageName(srcRef))
- }
- if destName == "" {
- return nil, errors.Errorf("error computing local image name for %q", transports.ImageName(srcRef))
+ if options.MaxRetries > 0 {
+ retries := uint(options.MaxRetries)
+ libimageOptions.MaxRetries = &retries
}
- destRef, err := is.Transport.ParseStoreReference(store, destName)
- if err != nil {
- return nil, errors.Wrapf(err, "error parsing image name %q", destName)
- }
- var maybeCachedDestRef = types.ImageReference(destRef)
if options.BlobDirectory != "" {
- cachedRef, err := blobcache.NewBlobCache(destRef, options.BlobDirectory, types.PreserveOriginal)
- if err != nil {
- return nil, errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(destRef), options.BlobDirectory)
- }
- maybeCachedDestRef = cachedRef
+ libimageOptions.DestinationLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal)
}
- policy, err := signature.DefaultPolicy(sc)
+ pullPolicy, err := config.ParsePullPolicy(options.PullPolicy.String())
if err != nil {
- return nil, errors.Wrapf(err, "error obtaining default signature policy")
+ return "", err
}
- policyContext, err := signature.NewPolicyContext(policy)
+ runtime, err := libimage.RuntimeFromStore(options.Store, &libimage.RuntimeOptions{SystemContext: options.SystemContext})
if err != nil {
- return nil, errors.Wrapf(err, "error creating new signature policy context")
- }
-
- defer func() {
- if err2 := policyContext.Destroy(); err2 != nil {
- logrus.Debugf("error destroying signature policy context: %v", err2)
- }
- }()
-
- logrus.Debugf("copying %q to %q", transports.ImageName(srcRef), destName)
- if _, err := retryCopyImage(ctx, policyContext, maybeCachedDestRef, srcRef, srcRef, getCopyOptions(store, options.ReportWriter, sc, sc, "", options.RemoveSignatures, "", nil, nil, options.OciDecryptConfig), options.MaxRetries, options.RetryDelay); err != nil {
- logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", transports.ImageName(srcRef), destName, err)
- return nil, err
+ return "", err
}
- return destRef, nil
-}
-// getImageDigest creates an image object and uses the hex value of the digest as the image ID
-// for parsing the store reference
-func getImageDigest(ctx context.Context, src types.ImageReference, sc *types.SystemContext) (string, error) {
- newImg, err := src.NewImage(ctx, sc)
+ pulledImages, err := runtime.Pull(context.Background(), imageName, pullPolicy, libimageOptions)
if err != nil {
- return "", errors.Wrapf(err, "error opening image %q for reading", transports.ImageName(src))
+ return "", err
}
- defer newImg.Close()
- digest := newImg.ConfigInfo().Digest
- if err = digest.Validate(); err != nil {
- return "", errors.Wrapf(err, "error getting config info from image %q", transports.ImageName(src))
+ if len(pulledImages) == 0 {
+ return "", errors.Errorf("internal error pulling %s: no image pulled and no error", imageName)
}
- return "@" + digest.Hex(), nil
-}
-// toLocalImageName converts an image name into a 'localhost/' prefixed one
-func toLocalImageName(imageName string) string {
- return "localhost/" + strings.TrimLeft(imageName, "/")
+ return pulledImages[0].ID(), nil
}
diff --git a/vendor/github.com/containers/buildah/push.go b/vendor/github.com/containers/buildah/push.go
new file mode 100644
index 000000000..692dfccd4
--- /dev/null
+++ b/vendor/github.com/containers/buildah/push.go
@@ -0,0 +1,126 @@
+package buildah
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/containers/buildah/pkg/blobcache"
+ "github.com/containers/common/libimage"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// PushOptions can be used to alter how an image is copied somewhere.
+type PushOptions struct {
+ // Compression specifies the type of compression which is applied to
+ // layer blobs. The default is to not use compression, but
+ // archive.Gzip is recommended.
+ Compression archive.Compression
+ // SignaturePolicyPath specifies an override location for the signature
+ // policy which should be used for verifying the new image as it is
+ // being written. Except in specific circumstances, no value should be
+ // specified, indicating that the shared, system-wide default policy
+ // should be used.
+ SignaturePolicyPath string
+ // ReportWriter is an io.Writer which will be used to log the writing
+ // of the new image.
+ ReportWriter io.Writer
+ // Store is the local storage store which holds the source image.
+ Store storage.Store
+ // github.com/containers/image/types SystemContext to hold credentials
+ // and other authentication/authorization information.
+ SystemContext *types.SystemContext
+ // ManifestType is the format to use when saving the image using the 'dir' transport
+ // possible options are oci, v2s1, and v2s2
+ ManifestType string
+ // BlobDirectory is the name of a directory in which we'll look for
+ // prebuilt copies of layer blobs that we might otherwise need to
+ // regenerate from on-disk layers, substituting them in the list of
+ // blobs to copy whenever possible.
+ BlobDirectory string
+ // Quiet is a boolean value that determines if minimal output to
+ // the user will be displayed, this is best used for logging.
+ // The default is false.
+ Quiet bool
+ // SignBy is the fingerprint of a GPG key to use for signing the image.
+ SignBy string
+ // RemoveSignatures causes any existing signatures for the image to be
+ // discarded for the pushed copy.
+ RemoveSignatures bool
+ // MaxRetries is the maximum number of attempts we'll make to push any
+ // one image to the external registry if the first attempt fails.
+ MaxRetries int
+ // RetryDelay is how long to wait before retrying a push attempt.
+ RetryDelay time.Duration
+ // OciEncryptConfig when non-nil indicates that an image should be encrypted.
+ // The encryption options is derived from the construction of EncryptConfig object.
+ OciEncryptConfig *encconfig.EncryptConfig
+ // OciEncryptLayers represents the list of layers to encrypt.
+ // If nil, don't encrypt any layers.
+ // If non-nil and len==0, denotes encrypt all layers.
+ // integers in the slice represent 0-indexed layer indices, with support for negativ
+ // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
+ OciEncryptLayers *[]int
+}
+
+// Push copies the contents of the image to a new location.
+func Push(ctx context.Context, image string, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) {
+ libimageOptions := &libimage.PushOptions{}
+ libimageOptions.SignaturePolicyPath = options.SignaturePolicyPath
+ libimageOptions.Writer = options.ReportWriter
+ libimageOptions.ManifestMIMEType = options.ManifestType
+ libimageOptions.SignBy = options.SignBy
+ libimageOptions.RemoveSignatures = options.RemoveSignatures
+ libimageOptions.RetryDelay = &options.RetryDelay
+ libimageOptions.OciEncryptConfig = options.OciEncryptConfig
+ libimageOptions.OciEncryptLayers = options.OciEncryptLayers
+ libimageOptions.PolicyAllowStorage = true
+
+ if options.Quiet {
+ libimageOptions.Writer = nil
+ }
+
+ if options.BlobDirectory != "" {
+ compress := types.PreserveOriginal
+ if options.Compression == archive.Gzip {
+ compress = types.Compress
+ }
+ libimageOptions.SourceLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, compress)
+ }
+
+ runtime, err := libimage.RuntimeFromStore(options.Store, &libimage.RuntimeOptions{SystemContext: options.SystemContext})
+ if err != nil {
+ return nil, "", err
+ }
+
+ destString := fmt.Sprintf("%s:%s", dest.Transport().Name(), dest.StringWithinTransport())
+ manifestBytes, err := runtime.Push(ctx, image, destString, libimageOptions)
+ if err != nil {
+ return nil, "", err
+ }
+
+ manifestDigest, err := manifest.Digest(manifestBytes)
+ if err != nil {
+ return nil, "", errors.Wrapf(err, "error computing digest of manifest of new image %q", transports.ImageName(dest))
+ }
+
+ var ref reference.Canonical
+ if name := dest.DockerReference(); name != nil {
+ ref, err = reference.WithDigest(name, manifestDigest)
+ if err != nil {
+ logrus.Warnf("error generating canonical reference with name %q and digest %s: %v", name, manifestDigest.String(), err)
+ }
+ }
+
+ return ref, manifestDigest, nil
+}
diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go
index 876850403..efffd1f5f 100644
--- a/vendor/github.com/containers/buildah/run.go
+++ b/vendor/github.com/containers/buildah/run.go
@@ -134,4 +134,9 @@ type RunOptions struct {
DropCapabilities []string
// Devices are the additional devices to add to the containers
Devices define.ContainerDevices
+ // Secrets are the available secrets to use in a RUN
+ Secrets map[string]string
+ // RunMounts are mounts for this run. RunMounts for this run
+ // will not show up in subsequent runs.
+ RunMounts []string
}
diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go
index 6356d2602..005607792 100644
--- a/vendor/github.com/containers/buildah/run_linux.go
+++ b/vendor/github.com/containers/buildah/run_linux.go
@@ -246,10 +246,17 @@ rootless=%d
bindFiles["/run/.containerenv"] = containerenvPath
}
- err = b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, namespaceOptions)
+ runMountTargets, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, namespaceOptions, options.Secrets, options.RunMounts)
if err != nil {
return errors.Wrapf(err, "error resolving mountpoints for container %q", b.ContainerID)
}
+
+ defer func() {
+ if err := cleanupRunMounts(runMountTargets, mountPoint); err != nil {
+ logrus.Errorf("unabe to cleanup run mounts %v", err)
+ }
+ }()
+
defer b.cleanupTempVolumes()
if options.CNIConfigDir == "" {
@@ -341,16 +348,16 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
for _, volume := range builtinVolumes {
- subdir := digest.Canonical.FromString(volume).Hex()
- volumePath := filepath.Join(containerDir, "buildah-volumes", subdir)
- srcPath := filepath.Join(mountPoint, volume)
+ volumePath := filepath.Join(containerDir, "buildah-volumes", digest.Canonical.FromString(volume).Hex())
initializeVolume := false
- // If we need to, initialize the volume path's initial contents.
+ // If we need to, create the directory that we'll use to hold
+ // the volume contents. If we do need to create it, then we'll
+ // need to populate it, too, so make a note of that.
if _, err := os.Stat(volumePath); err != nil {
if !os.IsNotExist(err) {
return nil, err
}
- logrus.Debugf("setting up built-in volume at %q", volumePath)
+ logrus.Debugf("setting up built-in volume path at %q for %q", volumePath, volume)
if err = os.MkdirAll(volumePath, 0755); err != nil {
return nil, err
}
@@ -359,28 +366,25 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin
}
initializeVolume = true
}
- // Check if srcPath is a symlink
- stat, err := os.Lstat(srcPath)
- // If srcPath is a symlink, follow the link and ensure the destination exists
- if err == nil && stat != nil && (stat.Mode()&os.ModeSymlink != 0) {
- srcPath, err = copier.Eval(mountPoint, volume, copier.EvalOptions{})
- if err != nil {
- return nil, errors.Wrapf(err, "evaluating symlink %q", srcPath)
- }
- // Stat the destination of the evaluated symlink
- stat, err = os.Stat(srcPath)
+ // Make sure the volume exists in the rootfs and read its attributes.
+ createDirPerms := os.FileMode(0755)
+ err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, volume), copier.MkdirOptions{
+ ChownNew: &hostOwner,
+ ChmodNew: &createDirPerms,
+ })
+ if err != nil {
+ return nil, errors.Wrapf(err, "ensuring volume path %q", filepath.Join(mountPoint, volume))
}
+ srcPath, err := copier.Eval(mountPoint, filepath.Join(mountPoint, volume), copier.EvalOptions{})
if err != nil {
- if !os.IsNotExist(err) {
- return nil, err
- }
- if err = idtools.MkdirAllAndChownNew(srcPath, 0755, hostOwner); err != nil {
- return nil, err
- }
- if stat, err = os.Stat(srcPath); err != nil {
- return nil, err
- }
+ return nil, errors.Wrapf(err, "evaluating path %q", srcPath)
+ }
+ stat, err := os.Stat(srcPath)
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
}
+ // If we need to populate the mounted volume's contents with
+ // content from the rootfs, set it up now.
if initializeVolume {
if err = os.Chmod(volumePath, stat.Mode().Perm()); err != nil {
return nil, err
@@ -388,6 +392,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin
if err = os.Chown(volumePath, int(stat.Sys().(*syscall.Stat_t).Uid), int(stat.Sys().(*syscall.Stat_t).Gid)); err != nil {
return nil, err
}
+ logrus.Debugf("populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath)
if err = extractWithTar(mountPoint, srcPath, volumePath); err != nil && !os.IsNotExist(errors.Cause(err)) {
return nil, errors.Wrapf(err, "error populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath)
}
@@ -403,7 +408,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin
return mounts, nil
}
-func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, shmSize string, namespaceOptions define.NamespaceOptions) error {
+func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, shmSize string, namespaceOptions define.NamespaceOptions, secrets map[string]string, runFileMounts []string) (runMountTargets []string, err error) {
// Start building a new list of mounts.
var mounts []specs.Mount
haveMount := func(destination string) bool {
@@ -497,39 +502,45 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
// After this point we need to know the per-container persistent storage directory.
cdir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
- return errors.Wrapf(err, "error determining work directory for container %q", b.ContainerID)
+ return nil, errors.Wrapf(err, "error determining work directory for container %q", b.ContainerID)
}
// Figure out which UID and GID to tell the subscriptions package to use
// for files that it creates.
rootUID, rootGID, err := util.GetHostRootIDs(spec)
if err != nil {
- return err
+ return nil, err
}
// Get the list of subscriptions mounts.
- secretMounts := subscriptions.MountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, mountPoint, int(rootUID), int(rootGID), unshare.IsRootless(), false)
+ subscriptionMounts := subscriptions.MountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, mountPoint, int(rootUID), int(rootGID), unshare.IsRootless(), false)
+
+ // Get the list of mounts that are just for this Run() call.
+ runMounts, runTargets, err := runSetupRunMounts(runFileMounts, secrets, b.MountLabel, cdir, spec.Linux.UIDMappings, spec.Linux.GIDMappings)
+ if err != nil {
+ return nil, err
+ }
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, builtinVolumes, int(rootUID), int(rootGID))
if err != nil {
- return err
+ return nil, err
}
// Get host UID and GID of the container process.
processUID, processGID, err := util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, spec.Process.User.UID, spec.Process.User.GID)
if err != nil {
- return err
+ return nil, err
}
// Get the list of explicitly-specified volume mounts.
volumes, err := b.runSetupVolumeMounts(spec.Linux.MountLabel, volumeMounts, optionMounts, int(rootUID), int(rootGID), int(processUID), int(processGID))
if err != nil {
- return err
+ return nil, err
}
- allMounts := util.SortMounts(append(append(append(append(append(volumes, builtins...), secretMounts...), bindFileMounts...), specMounts...), sysfsMount...))
+ allMounts := util.SortMounts(append(append(append(append(append(append(volumes, builtins...), runMounts...), subscriptionMounts...), bindFileMounts...), specMounts...), sysfsMount...))
// Add them all, in the preferred order, except where they conflict with something that was previously added.
for _, mount := range allMounts {
if haveMount(mount.Destination) {
@@ -542,7 +553,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
// Set the list in the spec.
spec.Mounts = mounts
- return nil
+ return runTargets, nil
}
// addNetworkConfig copies files from host and sets them up to bind mount into container
@@ -818,7 +829,7 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe
logrus.Debugf("Running %q", create.Args)
err = create.Run()
if err != nil {
- return 1, errors.Wrapf(err, "error creating container for %v: %s", pargs, runCollectOutput(errorFds, closeBeforeReadingErrorFds))
+ return 1, errors.Wrapf(err, "error from %s creating container for %v: %s", runtime, pargs, runCollectOutput(errorFds, closeBeforeReadingErrorFds))
}
defer func() {
err2 := del.Run()
@@ -826,7 +837,7 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe
if err == nil {
err = errors.Wrapf(err2, "error deleting container")
} else {
- logrus.Infof("error deleting container: %v", err2)
+ logrus.Infof("error from %s deleting container: %v", runtime, err2)
}
}
}()
@@ -879,13 +890,13 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe
logrus.Debugf("Running %q", start.Args)
err = start.Run()
if err != nil {
- return 1, errors.Wrapf(err, "error starting container")
+ return 1, errors.Wrapf(err, "error from %s starting container", runtime)
}
stopped := false
defer func() {
if !stopped {
if err2 := kill.Run(); err2 != nil {
- logrus.Infof("error stopping container: %v", err2)
+ logrus.Infof("error from %s stopping container: %v", runtime, err2)
}
}
}()
@@ -900,10 +911,10 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe
stat.Stderr = os.Stderr
stateOutput, err := stat.Output()
if err != nil {
- return 1, errors.Wrapf(err, "error reading container state (got output: %q)", string(stateOutput))
+ return 1, errors.Wrapf(err, "error reading container state from %s (got output: %q)", runtime, string(stateOutput))
}
if err = json.Unmarshal(stateOutput, &state); err != nil {
- return 1, errors.Wrapf(err, "error parsing container state %q", string(stateOutput))
+ return 1, errors.Wrapf(err, "error parsing container state %q from %s", string(stateOutput), runtime)
}
switch state.Status {
case "running":
@@ -2248,3 +2259,149 @@ type runUsingRuntimeSubprocOptions struct {
func init() {
reexec.Register(runUsingRuntimeCommand, runUsingRuntimeMain)
}
+
+// runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs
+func runSetupRunMounts(mounts []string, secrets map[string]string, mountlabel string, containerWorkingDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping) ([]spec.Mount, []string, error) {
+ mountTargets := make([]string, 0, 10)
+ finalMounts := make([]specs.Mount, 0, len(mounts))
+ for _, mount := range mounts {
+ arr := strings.SplitN(mount, ",", 2)
+ if len(arr) < 2 {
+ return nil, nil, errors.New("invalid mount syntax")
+ }
+
+ kv := strings.Split(arr[0], "=")
+ if len(kv) != 2 || kv[0] != "type" {
+ return nil, nil, errors.New("invalid mount type")
+ }
+
+ tokens := strings.Split(arr[1], ",")
+ // For now, we only support type secret.
+ switch kv[1] {
+ case "secret":
+ mount, err := getSecretMount(tokens, secrets, mountlabel, containerWorkingDir, uidmap, gidmap)
+ if err != nil {
+ return nil, nil, err
+ }
+ if mount != nil {
+ finalMounts = append(finalMounts, *mount)
+ mountTargets = append(mountTargets, mount.Destination)
+
+ }
+ default:
+ return nil, nil, errors.Errorf("invalid filesystem type %q", kv[1])
+ }
+ }
+ return finalMounts, mountTargets, nil
+}
+
+func getSecretMount(tokens []string, secrets map[string]string, mountlabel string, containerWorkingDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping) (*spec.Mount, error) {
+ errInvalidSyntax := errors.New("secret should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint")
+
+ var err error
+ var id, target string
+ var required bool
+ var uid, gid uint32
+ var mode uint32 = 400
+ for _, val := range tokens {
+ kv := strings.SplitN(val, "=", 2)
+ switch kv[0] {
+ case "id":
+ id = kv[1]
+ case "target":
+ target = kv[1]
+ case "required":
+ required, err = strconv.ParseBool(kv[1])
+ if err != nil {
+ return nil, errInvalidSyntax
+ }
+ case "mode":
+ mode64, err := strconv.ParseUint(kv[1], 8, 32)
+ if err != nil {
+ return nil, errInvalidSyntax
+ }
+ mode = uint32(mode64)
+ case "uid":
+ uid64, err := strconv.ParseUint(kv[1], 10, 32)
+ if err != nil {
+ return nil, errInvalidSyntax
+ }
+ uid = uint32(uid64)
+ case "gid":
+ gid64, err := strconv.ParseUint(kv[1], 10, 32)
+ if err != nil {
+ return nil, errInvalidSyntax
+ }
+ gid = uint32(gid64)
+ default:
+ return nil, errInvalidSyntax
+ }
+ }
+
+ if id == "" {
+ return nil, errInvalidSyntax
+ }
+ // Default location for secretis is /run/secrets/id
+ if target == "" {
+ target = "/run/secrets/" + id
+ }
+
+ src, ok := secrets[id]
+ if !ok {
+ if required {
+ return nil, errors.Errorf("secret required but no secret with id %s found", id)
+ }
+ return nil, nil
+ }
+
+ // Copy secrets to container working dir, since we need to chmod, chown and relabel it
+ // for the container user and we don't want to mess with the original file
+ ctrFileOnHost := filepath.Join(containerWorkingDir, "secrets", id)
+ _, err = os.Stat(ctrFileOnHost)
+ if os.IsNotExist(err) {
+ data, err := ioutil.ReadFile(src)
+ if err != nil {
+ return nil, err
+ }
+ if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0644); err != nil {
+ return nil, err
+ }
+ if err := ioutil.WriteFile(ctrFileOnHost, data, 0644); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := label.Relabel(ctrFileOnHost, mountlabel, false); err != nil {
+ return nil, err
+ }
+ hostUID, hostGID, err := util.GetHostIDs(uidmap, gidmap, uid, gid)
+ if err != nil {
+ return nil, err
+ }
+ if err := os.Lchown(ctrFileOnHost, int(hostUID), int(hostGID)); err != nil {
+ return nil, err
+ }
+ if err := os.Chmod(ctrFileOnHost, os.FileMode(mode)); err != nil {
+ return nil, err
+ }
+ newMount := specs.Mount{
+ Destination: target,
+ Type: "bind",
+ Source: ctrFileOnHost,
+ Options: []string{"bind", "rprivate", "ro"},
+ }
+ return &newMount, nil
+}
+
+func cleanupRunMounts(paths []string, mountpoint string) error {
+ opts := copier.RemoveOptions{
+ All: true,
+ }
+ for _, path := range paths {
+ err := copier.Remove(mountpoint, path, opts)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index b3fae6003..3b22a3943 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -5,7 +5,6 @@ import (
"io"
"net/url"
"os"
- "path"
"path/filepath"
"sort"
"strings"
@@ -13,12 +12,12 @@ import (
"syscall"
"github.com/containers/buildah/define"
+ "github.com/containers/common/libimage"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/shortnames"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/signature"
- is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
@@ -46,7 +45,7 @@ var (
}
)
-// ResolveName checks if name is a valid image name, and if that name doesn't
+// resolveName checks if name is a valid image name, and if that name doesn't
// include a domain portion, returns a list of the names which it might
// correspond to in the set of configured registries, the transport used to
// pull the image, and a boolean which is true iff
@@ -59,7 +58,7 @@ var (
//
// NOTE: The "list of search registries is empty" check does not count blocked registries,
// and neither the implied "localhost" nor a possible firstRegistry are counted
-func ResolveName(name string, firstRegistry string, sc *types.SystemContext, store storage.Store) ([]string, string, bool, error) {
+func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]string, string, bool, error) {
if name == "" {
return nil, "", false, nil
}
@@ -112,16 +111,6 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
searchRegistriesAreEmpty := len(registries) == 0
var candidates []string
- // Set the first registry if requested.
- if firstRegistry != "" && firstRegistry != "localhost" {
- middle := ""
- if prefix, ok := RegistryDefaultPathPrefix[firstRegistry]; ok && !strings.ContainsRune(name, '/') {
- middle = prefix
- }
- candidate := path.Join(firstRegistry, middle, name)
- candidates = append(candidates, candidate)
- }
-
// Local short-name resolution.
namedCandidates, err := shortnames.ResolveLocally(sc, name)
if err != nil {
@@ -144,11 +133,11 @@ func StartsWithValidTransport(name string) bool {
// the fully expanded result, including a tag. Names which don't include a registry
// name will be marked for the most-preferred registry (i.e., the first one in our
// configuration).
-func ExpandNames(names []string, firstRegistry string, systemContext *types.SystemContext, store storage.Store) ([]string, error) {
+func ExpandNames(names []string, systemContext *types.SystemContext, store storage.Store) ([]string, error) {
expanded := make([]string, 0, len(names))
for _, n := range names {
var name reference.Named
- nameList, _, _, err := ResolveName(n, firstRegistry, systemContext, store)
+ nameList, _, _, err := resolveName(n, systemContext, store)
if err != nil {
return nil, errors.Wrapf(err, "error parsing name %q", n)
}
@@ -172,45 +161,34 @@ func ExpandNames(names []string, firstRegistry string, systemContext *types.Syst
}
// FindImage locates the locally-stored image which corresponds to a given name.
+// Please note that the `firstRegistry` argument has been deprecated and has no
+// effect anymore.
func FindImage(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image string) (types.ImageReference, *storage.Image, error) {
- var ref types.ImageReference
- var img *storage.Image
- var err error
- names, _, _, err := ResolveName(image, firstRegistry, systemContext, store)
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
if err != nil {
- return nil, nil, errors.Wrapf(err, "error parsing name %q", image)
+ return nil, nil, err
}
- for _, name := range names {
- ref, err = is.Transport.ParseStoreReference(store, name)
- if err != nil {
- logrus.Debugf("error parsing reference to image %q: %v", name, err)
- continue
- }
- img, err = is.Transport.GetStoreImage(store, ref)
- if err != nil {
- img2, err2 := store.Image(name)
- if err2 != nil {
- logrus.Debugf("error locating image %q: %v", name, err2)
- continue
- }
- img = img2
- }
- break
+
+ localImage, _, err := runtime.LookupImage(image, &libimage.LookupImageOptions{IgnorePlatform: true})
+ if err != nil {
+ return nil, nil, err
}
- if ref == nil || img == nil {
- return nil, nil, errors.Wrapf(err, "error locating image with name %q (%v)", image, names)
+ ref, err := localImage.StorageReference()
+ if err != nil {
+ return nil, nil, err
}
- return ref, img, nil
+
+ return ref, localImage.StorageImage(), nil
}
-// ResolveNameToReferences tries to create a list of possible references
+// resolveNameToReferences tries to create a list of possible references
// (including their transports) from the provided image name.
func ResolveNameToReferences(
store storage.Store,
systemContext *types.SystemContext,
image string,
) (refs []types.ImageReference, err error) {
- names, transport, _, err := ResolveName(image, "", systemContext, store)
+ names, transport, _, err := resolveName(image, systemContext, store)
if err != nil {
return nil, errors.Wrapf(err, "error parsing name %q", image)
}
@@ -233,16 +211,26 @@ func ResolveNameToReferences(
return refs, nil
}
-// AddImageNames adds the specified names to the specified image.
+// AddImageNames adds the specified names to the specified image. Please note
+// that the `firstRegistry` argument has been deprecated and has no effect
+// anymore.
func AddImageNames(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image *storage.Image, addNames []string) error {
- names, err := ExpandNames(addNames, firstRegistry, systemContext, store)
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
if err != nil {
return err
}
- err = store.SetNames(image.ID, append(image.Names, names...))
+
+ localImage, _, err := runtime.LookupImage(image.ID, nil)
if err != nil {
- return errors.Wrapf(err, "error adding names (%v) to image %q", names, image.ID)
+ return err
}
+
+ for _, tag := range addNames {
+ if err := localImage.Tag(tag); err != nil {
+ return errors.Wrapf(err, "error tagging image %s", image.ID)
+ }
+ }
+
return nil
}
@@ -275,11 +263,6 @@ func Runtime() string {
return runtime
}
- // Need to switch default until runc supports cgroups v2
- if unified, _ := IsCgroup2UnifiedMode(); unified {
- return "crun"
- }
-
conf, err := config.Default()
if err != nil {
logrus.Warnf("Error loading container config when searching for local runtime: %v", err)
diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go
new file mode 100644
index 000000000..34cc0d45d
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/copier.go
@@ -0,0 +1,427 @@
+package libimage
+
+import (
+ "context"
+ "encoding/json"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/retry"
+ "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/signature"
+ storageTransport "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ defaultMaxRetries = 3
+ defaultRetryDelay = time.Second
+)
+
+// LookupReferenceFunc return an image reference based on the specified one.
+// This can be used to pass custom blob caches to the copy operation.
+type LookupReferenceFunc func(ref types.ImageReference) (types.ImageReference, error)
+
+// CopyOptions allow for customizing image-copy operations.
+type CopyOptions struct {
+ // If set, will be used for copying the image. Fields below may
+ // override certain settings.
+ SystemContext *types.SystemContext
+ // Allows for customizing the source reference lookup. This can be
+ // used to use custom blob caches.
+ SourceLookupReferenceFunc LookupReferenceFunc
+ // Allows for customizing the destination reference lookup. This can
+ // be used to use custom blob caches.
+ DestinationLookupReferenceFunc LookupReferenceFunc
+
+ // containers-auth.json(5) file to use when authenticating against
+ // container registries.
+ AuthFilePath string
+ // Custom path to a blob-info cache.
+ BlobInfoCacheDirPath string
+ // Path to the certificates directory.
+ CertDirPath string
+ // Force layer compression when copying to a `dir` transport destination.
+ DirForceCompress bool
+ // Allow contacting registries over HTTP, or HTTPS with failed TLS
+ // verification. Note that this does not affect other TLS connections.
+ InsecureSkipTLSVerify types.OptionalBool
+ // Maximum number of retries with exponential backoff when facing
+ // transient network errors. A reasonable default is used if not set.
+ // Default 3.
+ MaxRetries *uint
+ // RetryDelay used for the exponential back off of MaxRetries.
+ // Default 1 time.Scond.
+ RetryDelay *time.Duration
+ // ManifestMIMEType is the desired media type the image will be
+ // converted to if needed. Note that it must contain the exact MIME
+ // types. Short forms (e.g., oci, v2s2) used by some tools are not
+ // supported.
+ ManifestMIMEType string
+ // If OciEncryptConfig is non-nil, it indicates that an image should be
+ // encrypted. The encryption options is derived from the construction
+ // of EncryptConfig object. Note: During initial encryption process of
+ // a layer, the resultant digest is not known during creation, so
+ // newDigestingReader has to be set with validateDigest = false
+ OciEncryptConfig *encconfig.EncryptConfig
+ // OciEncryptLayers represents the list of layers to encrypt. If nil,
+ // don't encrypt any layers. If non-nil and len==0, denotes encrypt
+ // all layers. integers in the slice represent 0-indexed layer
+ // indices, with support for negative indexing. i.e. 0 is the first
+ // layer, -1 is the last (top-most) layer.
+ OciEncryptLayers *[]int
+ // OciDecryptConfig contains the config that can be used to decrypt an
+ // image if it is encrypted if non-nil. If nil, it does not attempt to
+ // decrypt an image.
+ OciDecryptConfig *encconfig.DecryptConfig
+ // Reported to when ProgressInterval has arrived for a single
+ // artifact+offset.
+ Progress chan types.ProgressProperties
+ // If set, allow using the storage transport even if it's disabled by
+ // the specified SignaturePolicyPath.
+ PolicyAllowStorage bool
+ // SignaturePolicyPath to overwrite the default one.
+ SignaturePolicyPath string
+ // If non-empty, asks for a signature to be added during the copy, and
+ // specifies a key ID.
+ SignBy string
+ // Remove any pre-existing signatures. SignBy will still add a new
+ // signature.
+ RemoveSignatures bool
+ // Writer is used to display copy information including progress bars.
+ Writer io.Writer
+
+ // ----- platform -----------------------------------------------------
+
+ // Architecture to use for choosing images.
+ Architecture string
+ // OS to use for choosing images.
+ OS string
+ // Variant to use when choosing images.
+ Variant string
+
+ // ----- credentials --------------------------------------------------
+
+ // Username to use when authenticating at a container registry.
+ Username string
+ // Password to use when authenticating at a container registry.
+ Password string
+ // Credentials is an alternative way to specify credentials in format
+ // "username[:password]". Cannot be used in combination with
+ // Username/Password.
+ Credentials string
+ // IdentityToken is used to authenticate the user and get
+ // an access token for the registry.
+ IdentityToken string `json:"identitytoken,omitempty"`
+
+ // ----- internal -----------------------------------------------------
+
+ // Additional tags when creating or copying a docker-archive.
+ dockerArchiveAdditionalTags []reference.NamedTagged
+}
+
+// copier is an internal helper to conveniently copy images.
+type copier struct {
+ imageCopyOptions copy.Options
+ retryOptions retry.RetryOptions
+ systemContext *types.SystemContext
+ policyContext *signature.PolicyContext
+
+ sourceLookup LookupReferenceFunc
+ destinationLookup LookupReferenceFunc
+}
+
+var (
+ // storageAllowedPolicyScopes overrides the policy for local storage
+ // to ensure that we can read images from it.
+ storageAllowedPolicyScopes = signature.PolicyTransportScopes{
+ "": []signature.PolicyRequirement{
+ signature.NewPRInsecureAcceptAnything(),
+ },
+ }
+)
+
+// getDockerAuthConfig extracts a docker auth config from the CopyOptions. Returns
+// nil if no credentials are set.
+func (options *CopyOptions) getDockerAuthConfig() (*types.DockerAuthConfig, error) {
+ authConf := &types.DockerAuthConfig{IdentityToken: options.IdentityToken}
+
+ if options.Username != "" {
+ if options.Credentials != "" {
+ return nil, errors.New("username/password cannot be used with credentials")
+ }
+ authConf.Username = options.Username
+ authConf.Password = options.Password
+ return authConf, nil
+ }
+
+ if options.Credentials != "" {
+ split := strings.SplitN(options.Credentials, ":", 2)
+ switch len(split) {
+ case 1:
+ authConf.Username = split[0]
+ default:
+ authConf.Username = split[0]
+ authConf.Password = split[1]
+ }
+ return authConf, nil
+ }
+
+ // We should return nil unless a token was set. That's especially
+ // useful for Podman's remote API.
+ if options.IdentityToken != "" {
+ return authConf, nil
+ }
+
+ return nil, nil
+}
+
+// newCopier creates a copier. Note that fields in options *may* overwrite the
+// counterparts of the specified system context. Please make sure to call
+// `(*copier).close()`.
+func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) {
+ c := copier{}
+ c.systemContext = r.systemContextCopy()
+
+ if options.SourceLookupReferenceFunc != nil {
+ c.sourceLookup = options.SourceLookupReferenceFunc
+ }
+
+ if options.DestinationLookupReferenceFunc != nil {
+ c.destinationLookup = options.DestinationLookupReferenceFunc
+ }
+
+ if options.InsecureSkipTLSVerify != types.OptionalBoolUndefined {
+ c.systemContext.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify
+ c.systemContext.OCIInsecureSkipTLSVerify = options.InsecureSkipTLSVerify == types.OptionalBoolTrue
+ c.systemContext.DockerDaemonInsecureSkipTLSVerify = options.InsecureSkipTLSVerify == types.OptionalBoolTrue
+ }
+
+ c.systemContext.DirForceCompress = c.systemContext.DirForceCompress || options.DirForceCompress
+
+ if options.AuthFilePath != "" {
+ c.systemContext.AuthFilePath = options.AuthFilePath
+ }
+
+ c.systemContext.DockerArchiveAdditionalTags = options.dockerArchiveAdditionalTags
+
+ if options.Architecture != "" {
+ c.systemContext.ArchitectureChoice = options.Architecture
+ }
+ if options.OS != "" {
+ c.systemContext.OSChoice = options.OS
+ }
+ if options.Variant != "" {
+ c.systemContext.VariantChoice = options.Variant
+ }
+
+ if options.SignaturePolicyPath != "" {
+ c.systemContext.SignaturePolicyPath = options.SignaturePolicyPath
+ }
+
+ dockerAuthConfig, err := options.getDockerAuthConfig()
+ if err != nil {
+ return nil, err
+ }
+ if dockerAuthConfig != nil {
+ c.systemContext.DockerAuthConfig = dockerAuthConfig
+ }
+
+ if options.BlobInfoCacheDirPath != "" {
+ c.systemContext.BlobInfoCacheDir = options.BlobInfoCacheDirPath
+ }
+
+ if options.CertDirPath != "" {
+ c.systemContext.DockerCertPath = options.CertDirPath
+ }
+
+ policy, err := signature.DefaultPolicy(c.systemContext)
+ if err != nil {
+ return nil, err
+ }
+
+ // Buildah compatibility: even if the policy denies _all_ transports,
+ // Buildah still wants the storage to be accessible.
+ if options.PolicyAllowStorage {
+ policy.Transports[storageTransport.Transport.Name()] = storageAllowedPolicyScopes
+ }
+
+ policyContext, err := signature.NewPolicyContext(policy)
+ if err != nil {
+ return nil, err
+ }
+
+ c.policyContext = policyContext
+
+ c.retryOptions.MaxRetry = defaultMaxRetries
+ if options.MaxRetries != nil {
+ c.retryOptions.MaxRetry = int(*options.MaxRetries)
+ }
+ c.retryOptions.Delay = defaultRetryDelay
+ if options.RetryDelay != nil {
+ c.retryOptions.Delay = *options.RetryDelay
+ }
+
+ c.imageCopyOptions.Progress = options.Progress
+ if c.imageCopyOptions.Progress != nil {
+ c.imageCopyOptions.ProgressInterval = time.Second
+ }
+
+ c.imageCopyOptions.ForceManifestMIMEType = options.ManifestMIMEType
+ c.imageCopyOptions.SourceCtx = c.systemContext
+ c.imageCopyOptions.DestinationCtx = c.systemContext
+ c.imageCopyOptions.OciEncryptConfig = options.OciEncryptConfig
+ c.imageCopyOptions.OciEncryptLayers = options.OciEncryptLayers
+ c.imageCopyOptions.OciDecryptConfig = options.OciDecryptConfig
+ c.imageCopyOptions.RemoveSignatures = options.RemoveSignatures
+ c.imageCopyOptions.SignBy = options.SignBy
+ c.imageCopyOptions.ReportWriter = options.Writer
+
+ defaultContainerConfig, err := config.Default()
+ if err != nil {
+ logrus.Warnf("failed to get container config for copy options: %v", err)
+ } else {
+ c.imageCopyOptions.MaxParallelDownloads = defaultContainerConfig.Engine.ImageParallelCopies
+ }
+
+ return &c, nil
+}
+
+// close open resources.
+func (c *copier) close() error {
+ return c.policyContext.Destroy()
+}
+
+// copy the source to the destination. Returns the bytes of the copied
+// manifest which may be used for digest computation.
+func (c *copier) copy(ctx context.Context, source, destination types.ImageReference) ([]byte, error) {
+ logrus.Debugf("Copying source image %s to destination image %s", source.StringWithinTransport(), destination.StringWithinTransport())
+
+ var err error
+
+ if c.sourceLookup != nil {
+ source, err = c.sourceLookup(source)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if c.destinationLookup != nil {
+ destination, err = c.destinationLookup(destination)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Buildah compat: used when running in OpenShift.
+ sourceInsecure, err := checkRegistrySourcesAllows(source)
+ if err != nil {
+ return nil, err
+ }
+ destinationInsecure, err := checkRegistrySourcesAllows(destination)
+ if err != nil {
+ return nil, err
+ }
+
+ // Sanity checks for Buildah.
+ if sourceInsecure != nil && *sourceInsecure {
+ if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
+ return nil, errors.Errorf("can't require tls verification on an insecured registry")
+ }
+ }
+ if destinationInsecure != nil && *destinationInsecure {
+ if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
+ return nil, errors.Errorf("can't require tls verification on an insecured registry")
+ }
+ }
+
+ var copiedManifest []byte
+ f := func() error {
+ opts := c.imageCopyOptions
+ if sourceInsecure != nil {
+ value := types.NewOptionalBool(*sourceInsecure)
+ opts.SourceCtx.DockerInsecureSkipTLSVerify = value
+ }
+ if destinationInsecure != nil {
+ value := types.NewOptionalBool(*destinationInsecure)
+ opts.DestinationCtx.DockerInsecureSkipTLSVerify = value
+ }
+
+ var err error
+ copiedManifest, err = copy.Image(ctx, c.policyContext, destination, source, &opts)
+ return err
+ }
+ return copiedManifest, retry.RetryIfNecessary(ctx, f, &c.retryOptions)
+}
+
+// checkRegistrySourcesAllows checks the $BUILD_REGISTRY_SOURCES environment
+// variable, if it's set. The contents are expected to be a JSON-encoded
+// github.com/openshift/api/config/v1.Image, set by an OpenShift build
+// controller that arranged for us to be run in a container.
+//
+// If set, the insecure return value indicates whether the registry is set to
+// be insecure.
+//
+// NOTE: this functionality is required by Buildah.
+func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err error) {
+ registrySources, ok := os.LookupEnv("BUILD_REGISTRY_SOURCES")
+ if !ok || registrySources == "" {
+ return nil, nil
+ }
+
+ logrus.Debugf("BUILD_REGISTRY_SOURCES set %q", registrySources)
+
+ dref := dest.DockerReference()
+ if dref == nil || reference.Domain(dref) == "" {
+ return nil, nil
+ }
+
+ // Use local struct instead of github.com/openshift/api/config/v1 RegistrySources
+ var sources struct {
+ InsecureRegistries []string `json:"insecureRegistries,omitempty"`
+ BlockedRegistries []string `json:"blockedRegistries,omitempty"`
+ AllowedRegistries []string `json:"allowedRegistries,omitempty"`
+ }
+ if err := json.Unmarshal([]byte(registrySources), &sources); err != nil {
+ return nil, errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources)
+ }
+ blocked := false
+ if len(sources.BlockedRegistries) > 0 {
+ for _, blockedDomain := range sources.BlockedRegistries {
+ if blockedDomain == reference.Domain(dref) {
+ blocked = true
+ }
+ }
+ }
+ if blocked {
+ return nil, errors.Errorf("registry %q denied by policy: it is in the blocked registries list (%s)", reference.Domain(dref), registrySources)
+ }
+ allowed := true
+ if len(sources.AllowedRegistries) > 0 {
+ allowed = false
+ for _, allowedDomain := range sources.AllowedRegistries {
+ if allowedDomain == reference.Domain(dref) {
+ allowed = true
+ }
+ }
+ }
+ if !allowed {
+ return nil, errors.Errorf("registry %q denied by policy: not in allowed registries list (%s)", reference.Domain(dref), registrySources)
+ }
+
+ for _, inseureDomain := range sources.InsecureRegistries {
+ if inseureDomain == reference.Domain(dref) {
+ insecure := true
+ return &insecure, nil
+ }
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/common/libimage/disk_usage.go b/vendor/github.com/containers/common/libimage/disk_usage.go
new file mode 100644
index 000000000..edfd095a0
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/disk_usage.go
@@ -0,0 +1,126 @@
+package libimage
+
+import (
+ "context"
+ "time"
+)
+
+// ImageDiskUsage reports the total size of an image. That is the size
+type ImageDiskUsage struct {
+ // Number of containers using the image.
+ Containers int
+ // ID of the image.
+ ID string
+ // Repository of the image.
+ Repository string
+ // Tag of the image.
+ Tag string
+ // Created time stamp.
+ Created time.Time
+ // The amount of space that an image shares with another one (i.e. their common data).
+ SharedSize int64
+ // The the amount of space that is only used by a given image.
+ UniqueSize int64
+ // Sum of shared an unique size.
+ Size int64
+}
+
+// DiskUsage calculates the disk usage for each image in the local containers
+// storage. Note that a single image may yield multiple usage reports, one for
+// each repository tag.
+func (r *Runtime) DiskUsage(ctx context.Context) ([]ImageDiskUsage, error) {
+ layerTree, err := r.layerTree()
+ if err != nil {
+ return nil, err
+ }
+
+ images, err := r.ListImages(ctx, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var allUsages []ImageDiskUsage
+ for _, image := range images {
+ usages, err := diskUsageForImage(ctx, image, layerTree)
+ if err != nil {
+ return nil, err
+ }
+ allUsages = append(allUsages, usages...)
+ }
+ return allUsages, err
+}
+
+// diskUsageForImage returns the disk-usage baseistics for the specified image.
+func diskUsageForImage(ctx context.Context, image *Image, tree *layerTree) ([]ImageDiskUsage, error) {
+ base := ImageDiskUsage{
+ ID: image.ID(),
+ Created: image.Created(),
+ Repository: "<none>",
+ Tag: "<none>",
+ }
+
+ // Shared, unique and total size.
+ parent, err := tree.parent(ctx, image)
+ if err != nil {
+ return nil, err
+ }
+ childIDs, err := tree.children(ctx, image, false)
+ if err != nil {
+ return nil, err
+ }
+
+ // Optimistically set unique size to the full size of the image.
+ size, err := image.Size()
+ if err != nil {
+ return nil, err
+ }
+ base.UniqueSize = size
+
+ if len(childIDs) > 0 {
+ // If we have children, we share everything.
+ base.SharedSize = base.UniqueSize
+ base.UniqueSize = 0
+ } else if parent != nil {
+ // If we have no children but a parent, remove the parent
+ // (shared) size from the unique one.
+ size, err := parent.Size()
+ if err != nil {
+ return nil, err
+ }
+ base.UniqueSize -= size
+ base.SharedSize = size
+ }
+
+ base.Size = base.SharedSize + base.UniqueSize
+
+ // Number of containers using the image.
+ containers, err := image.Containers()
+ if err != nil {
+ return nil, err
+ }
+ base.Containers = len(containers)
+
+ repoTags, err := image.NamedRepoTags()
+ if err != nil {
+ return nil, err
+ }
+
+ if len(repoTags) == 0 {
+ return []ImageDiskUsage{base}, nil
+ }
+
+ pairs, err := ToNameTagPairs(repoTags)
+ if err != nil {
+ return nil, err
+ }
+
+ results := make([]ImageDiskUsage, len(pairs))
+ for i, pair := range pairs {
+ res := base
+ res.Repository = pair.Name
+ res.Tag = pair.Tag
+ results[i] = res
+ }
+
+ return results, nil
+}
diff --git a/vendor/github.com/containers/common/libimage/download.go b/vendor/github.com/containers/common/libimage/download.go
new file mode 100644
index 000000000..5ea11f084
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/download.go
@@ -0,0 +1,46 @@
+package libimage
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+
+ "github.com/pkg/errors"
+)
+
+// tmpdir returns a path to a temporary directory.
+func (r *Runtime) tmpdir() string {
+ tmpdir := os.Getenv("TMPDIR")
+ if tmpdir == "" {
+ tmpdir = "/var/tmp"
+ }
+
+ return tmpdir
+}
+
+// downloadFromURL downloads an image in the format "https:/example.com/myimage.tar"
+// and temporarily saves in it $TMPDIR/importxyz, which is deleted after the image is imported
+func (r *Runtime) downloadFromURL(source string) (string, error) {
+ fmt.Printf("Downloading from %q\n", source)
+
+ outFile, err := ioutil.TempFile(r.tmpdir(), "import")
+ if err != nil {
+ return "", errors.Wrap(err, "error creating file")
+ }
+ defer outFile.Close()
+
+ response, err := http.Get(source) // nolint:noctx
+ if err != nil {
+ return "", errors.Wrapf(err, "error downloading %q", source)
+ }
+ defer response.Body.Close()
+
+ _, err = io.Copy(outFile, response.Body)
+ if err != nil {
+ return "", errors.Wrapf(err, "error saving %s to %s", source, outFile.Name())
+ }
+
+ return outFile.Name(), nil
+}
diff --git a/vendor/github.com/containers/common/libimage/events.go b/vendor/github.com/containers/common/libimage/events.go
new file mode 100644
index 000000000..bca736c7b
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/events.go
@@ -0,0 +1,43 @@
+package libimage
+
+import "time"
+
+// EventType indicates the type of an event. Currrently, there is only one
+// supported type for container image but we may add more (e.g., for manifest
+// lists) in the future.
+type EventType int
+
+const (
+ // EventTypeUnknow is an unitialized EventType.
+ EventTypeUnknown EventType = iota
+ // EventTypeImagePull represents an image pull.
+ EventTypeImagePull
+ // EventTypeImagePush represents an image push.
+ EventTypeImagePush
+ // EventTypeImageRemove represents an image removal.
+ EventTypeImageRemove
+ // EventTypeImageLoad represents an image being loaded.
+ EventTypeImageLoad
+ // EventTypeImageSave represents an image being saved.
+ EventTypeImageSave
+ // EventTypeImageTag represents an image being tagged.
+ EventTypeImageTag
+ // EventTypeImageUntag represents an image being untagged.
+ EventTypeImageUntag
+ // EventTypeImageMount represents an image being mounted.
+ EventTypeImageMount
+ // EventTypeImageUnmounted represents an image being unmounted.
+ EventTypeImageUnmount
+)
+
+// Event represents an event such an image pull or image tag.
+type Event struct {
+ // ID of the object (e.g., image ID).
+ ID string
+ // Name of the object (e.g., image name "quay.io/containers/podman:latest")
+ Name string
+ // Time of the event.
+ Time time.Time
+ // Type of the event.
+ Type EventType
+}
diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go
new file mode 100644
index 000000000..eae18fd9c
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/filters.go
@@ -0,0 +1,228 @@
+package libimage
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ filtersPkg "github.com/containers/common/pkg/filters"
+ "github.com/containers/common/pkg/timetype"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// filterFunc is a prototype for a positive image filter. Returning `true`
+// indicates that the image matches the criteria.
+type filterFunc func(*Image) (bool, error)
+
+// filterImages returns a slice of images which are passing all specified
+// filters.
+func filterImages(images []*Image, filters []filterFunc) ([]*Image, error) {
+ if len(filters) == 0 {
+ return images, nil
+ }
+ result := []*Image{}
+ for i := range images {
+ include := true
+ var err error
+ for _, filter := range filters {
+ include, err = filter(images[i])
+ if err != nil {
+ return nil, err
+ }
+ if !include {
+ break
+ }
+ }
+ if include {
+ result = append(result, images[i])
+ }
+ }
+ return result, nil
+}
+
+// compileImageFilters creates `filterFunc`s for the specified filters. The
+// required format is `key=value` with the following supported keys:
+// after, since, before, containers, dangling, id, label, readonly, reference, intermediate
+func (r *Runtime) compileImageFilters(ctx context.Context, filters []string) ([]filterFunc, error) {
+ logrus.Tracef("Parsing image filters %s", filters)
+
+ filterFuncs := []filterFunc{}
+ for _, filter := range filters {
+ var key, value string
+ split := strings.SplitN(filter, "=", 2)
+ if len(split) != 2 {
+ return nil, errors.Errorf("invalid image filter %q: must be in the format %q", filter, "filter=value")
+ }
+
+ key = split[0]
+ value = split[1]
+ switch key {
+
+ case "after", "since":
+ img, _, err := r.LookupImage(value, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not find local image for filter %q", filter)
+ }
+ filterFuncs = append(filterFuncs, filterAfter(img.Created()))
+
+ case "before":
+ img, _, err := r.LookupImage(value, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not find local image for filter %q", filter)
+ }
+ filterFuncs = append(filterFuncs, filterBefore(img.Created()))
+
+ case "containers":
+ containers, err := strconv.ParseBool(value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "non-boolean value %q for dangling filter", value)
+ }
+ filterFuncs = append(filterFuncs, filterContainers(containers))
+
+ case "dangling":
+ dangling, err := strconv.ParseBool(value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "non-boolean value %q for dangling filter", value)
+ }
+ filterFuncs = append(filterFuncs, filterDangling(dangling))
+
+ case "id":
+ filterFuncs = append(filterFuncs, filterID(value))
+
+ case "intermediate":
+ intermediate, err := strconv.ParseBool(value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "non-boolean value %q for intermediate filter", value)
+ }
+ filterFuncs = append(filterFuncs, filterIntermediate(ctx, intermediate))
+
+ case "label":
+ filterFuncs = append(filterFuncs, filterLabel(ctx, value))
+
+ case "readonly":
+ readOnly, err := strconv.ParseBool(value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "non-boolean value %q for readonly filter", value)
+ }
+ filterFuncs = append(filterFuncs, filterReadOnly(readOnly))
+
+ case "reference":
+ filterFuncs = append(filterFuncs, filterReference(value))
+
+ case "until":
+ ts, err := timetype.GetTimestamp(value, time.Now())
+ if err != nil {
+ return nil, err
+ }
+ seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0)
+ if err != nil {
+ return nil, err
+ }
+ until := time.Unix(seconds, nanoseconds)
+ filterFuncs = append(filterFuncs, filterBefore(until))
+
+ default:
+ return nil, errors.Errorf("unsupported image filter %q", key)
+ }
+ }
+
+ return filterFuncs, nil
+}
+
+// filterReference creates a reference filter for matching the specified value.
+func filterReference(value string) filterFunc {
+ // Replacing all '/' with '|' so that filepath.Match() can work '|'
+ // character is not valid in image name, so this is safe.
+ //
+ // TODO: this has been copied from Podman and requires some more review
+ // and especially tests.
+ filter := fmt.Sprintf("*%s*", value)
+ filter = strings.ReplaceAll(filter, "/", "|")
+ return func(img *Image) (bool, error) {
+ if len(value) < 1 {
+ return true, nil
+ }
+ for _, name := range img.Names() {
+ newName := strings.ReplaceAll(name, "/", "|")
+ match, _ := filepath.Match(filter, newName)
+ if match {
+ return true, nil
+ }
+ }
+ return false, nil
+ }
+}
+
+// filterLabel creates a label for matching the specified value.
+func filterLabel(ctx context.Context, value string) filterFunc {
+ return func(img *Image) (bool, error) {
+ labels, err := img.Labels(ctx)
+ if err != nil {
+ return false, err
+ }
+ return filtersPkg.MatchLabelFilters([]string{value}, labels), nil
+ }
+}
+
+// filterAfter creates an after filter for matching the specified value.
+func filterAfter(value time.Time) filterFunc {
+ return func(img *Image) (bool, error) {
+ return img.Created().After(value), nil
+ }
+}
+
+// filterBefore creates a before filter for matching the specified value.
+func filterBefore(value time.Time) filterFunc {
+ return func(img *Image) (bool, error) {
+ return img.Created().Before(value), nil
+ }
+}
+
+// filterReadOnly creates a readonly filter for matching the specified value.
+func filterReadOnly(value bool) filterFunc {
+ return func(img *Image) (bool, error) {
+ return img.IsReadOnly() == value, nil
+ }
+}
+
+// filterContainers creates a container filter for matching the specified value.
+func filterContainers(value bool) filterFunc {
+ return func(img *Image) (bool, error) {
+ ctrs, err := img.Containers()
+ if err != nil {
+ return false, err
+ }
+ return (len(ctrs) > 0) == value, nil
+ }
+}
+
+// filterDangling creates a dangling filter for matching the specified value.
+func filterDangling(value bool) filterFunc {
+ return func(img *Image) (bool, error) {
+ return img.IsDangling() == value, nil
+ }
+}
+
+// filterID creates an image-ID filter for matching the specified value.
+func filterID(value string) filterFunc {
+ return func(img *Image) (bool, error) {
+ return img.ID() == value, nil
+ }
+}
+
+// filterIntermediate creates an intermediate filter for images. An image is
+// considered to be an intermediate image if it is dangling (i.e., no tags) and
+// has no children (i.e., no other image depends on it).
+func filterIntermediate(ctx context.Context, value bool) filterFunc {
+ return func(img *Image) (bool, error) {
+ isIntermediate, err := img.IsIntermediate(ctx)
+ if err != nil {
+ return false, err
+ }
+ return isIntermediate == value, nil
+ }
+}
diff --git a/vendor/github.com/containers/common/libimage/history.go b/vendor/github.com/containers/common/libimage/history.go
new file mode 100644
index 000000000..b63fe696b
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/history.go
@@ -0,0 +1,80 @@
+package libimage
+
+import (
+ "context"
+ "time"
+
+ "github.com/containers/storage"
+)
+
+// ImageHistory contains the history information of an image.
+type ImageHistory struct {
+ ID string `json:"id"`
+ Created *time.Time `json:"created"`
+ CreatedBy string `json:"createdBy"`
+ Size int64 `json:"size"`
+ Comment string `json:"comment"`
+ Tags []string `json:"tags"`
+}
+
+// History computes the image history of the image including all of its parents.
+func (i *Image) History(ctx context.Context) ([]ImageHistory, error) {
+ ociImage, err := i.toOCI(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ layerTree, err := i.runtime.layerTree()
+ if err != nil {
+ return nil, err
+ }
+
+ var allHistory []ImageHistory
+ var layer *storage.Layer
+ if i.TopLayer() != "" {
+ layer, err = i.runtime.store.Layer(i.TopLayer())
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Iterate in reverse order over the history entries, and lookup the
+ // corresponding image ID, size and get the next later if needed.
+ numHistories := len(ociImage.History) - 1
+ usedIDs := make(map[string]bool) // prevents assigning images IDs more than once
+ for x := numHistories; x >= 0; x-- {
+ history := ImageHistory{
+ ID: "<missing>", // may be overridden below
+ Created: ociImage.History[x].Created,
+ CreatedBy: ociImage.History[x].CreatedBy,
+ Comment: ociImage.History[x].Comment,
+ }
+
+ if layer != nil {
+ history.Tags = layer.Names
+ if !ociImage.History[x].EmptyLayer {
+ history.Size = layer.UncompressedSize
+ }
+ // Query the layer tree if it's the top layer of an
+ // image.
+ node := layerTree.node(layer.ID)
+ if len(node.images) > 0 {
+ id := node.images[0].ID() // always use the first one
+ if _, used := usedIDs[id]; !used {
+ history.ID = id
+ usedIDs[id] = true
+ }
+ }
+ if layer.Parent != "" && !ociImage.History[x].EmptyLayer {
+ layer, err = i.runtime.store.Layer(layer.Parent)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ allHistory = append(allHistory, history)
+ }
+
+ return allHistory, nil
+}
diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go
new file mode 100644
index 000000000..4728565bb
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/image.go
@@ -0,0 +1,802 @@
+package libimage
+
+import (
+ "context"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ storageTransport "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/hashicorp/go-multierror"
+ "github.com/opencontainers/go-digest"
+ ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// Image represents an image in the containers storage and allows for further
+// operations and data manipulation.
+type Image struct {
+ // Backwards pointer to the runtime.
+ runtime *Runtime
+
+ // Counterpart in the local containers storage.
+ storageImage *storage.Image
+
+ // Image reference to the containers storage.
+ storageReference types.ImageReference
+
+ // All fields in the below structure are cached. They may be cleared
+ // at any time. When adding a new field, please make sure to clear
+ // it in `(*Image).reload()`.
+ cached struct {
+ // Image source. Cached for performance reasons.
+ imageSource types.ImageSource
+ // Inspect data we get from containers/image.
+ partialInspectData *types.ImageInspectInfo
+ // Fully assembled image data.
+ completeInspectData *ImageData
+ // Corresponding OCI image.
+ ociv1Image *ociv1.Image
+ }
+}
+
+// reload the image and pessimitically clear all cached data.
+func (i *Image) reload() error {
+ logrus.Tracef("Reloading image %s", i.ID())
+ img, err := i.runtime.store.Image(i.ID())
+ if err != nil {
+ return errors.Wrap(err, "error reloading image")
+ }
+ i.storageImage = img
+ i.cached.imageSource = nil
+ i.cached.partialInspectData = nil
+ i.cached.completeInspectData = nil
+ i.cached.ociv1Image = nil
+ return nil
+}
+
+// Names returns associated names with the image which may be a mix of tags and
+// digests.
+func (i *Image) Names() []string {
+ return i.storageImage.Names
+}
+
+// StorageImage returns the underlying storage.Image.
+func (i *Image) StorageImage() *storage.Image {
+ return i.storageImage
+}
+
+// NamesHistory returns a string array of names previously associated with the
+// image, which may be a mixture of tags and digests.
+func (i *Image) NamesHistory() []string {
+ return i.storageImage.NamesHistory
+}
+
+// ID returns the ID of the image.
+func (i *Image) ID() string {
+ return i.storageImage.ID
+}
+
+// Digest is a digest value that we can use to locate the image, if one was
+// specified at creation-time.
+func (i *Image) Digest() digest.Digest {
+ return i.storageImage.Digest
+}
+
+// Digests is a list of digest values of the image's manifests, and possibly a
+// manually-specified value, that we can use to locate the image. If Digest is
+// set, its value is also in this list.
+func (i *Image) Digests() []digest.Digest {
+ return i.storageImage.Digests
+}
+
+// IsReadOnly returns whether the image is set read only.
+func (i *Image) IsReadOnly() bool {
+ return i.storageImage.ReadOnly
+}
+
+// IsDangling returns true if the image is dangling. An image is considered
+// dangling if no names are associated with it in the containers storage.
+func (i *Image) IsDangling() bool {
+ return len(i.Names()) == 0
+}
+
+// IsIntermediate returns true if the image is an intermediate image, that is
+// a dangling image without children.
+func (i *Image) IsIntermediate(ctx context.Context) (bool, error) {
+ // If the image has tags, it's not an intermediate one.
+ if !i.IsDangling() {
+ return false, nil
+ }
+ children, err := i.getChildren(ctx, false)
+ if err != nil {
+ return false, err
+ }
+ // No tags, no children -> intermediate!
+ return len(children) != 0, nil
+}
+
+// Created returns the time the image was created.
+func (i *Image) Created() time.Time {
+ return i.storageImage.Created
+}
+
+// Labels returns the label of the image.
+func (i *Image) Labels(ctx context.Context) (map[string]string, error) {
+ data, err := i.inspectInfo(ctx)
+ if err != nil {
+ isManifestList, listErr := i.IsManifestList(ctx)
+ if listErr != nil {
+ err = errors.Wrapf(err, "fallback error checking whether image is a manifest list: %v", err)
+ } else if isManifestList {
+ logrus.Debugf("Ignoring error: cannot return labels for manifest list or image index %s", i.ID())
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ return data.Labels, nil
+}
+
+// TopLayer returns the top layer id as a string
+func (i *Image) TopLayer() string {
+ return i.storageImage.TopLayer
+}
+
+// Parent returns the parent image or nil if there is none
+func (i *Image) Parent(ctx context.Context) (*Image, error) {
+ tree, err := i.runtime.layerTree()
+ if err != nil {
+ return nil, err
+ }
+ return tree.parent(ctx, i)
+}
+
+// HasChildren returns indicates if the image has children.
+func (i *Image) HasChildren(ctx context.Context) (bool, error) {
+ children, err := i.getChildren(ctx, false)
+ if err != nil {
+ return false, err
+ }
+ return len(children) > 0, nil
+}
+
+// Children returns the image's children.
+func (i *Image) Children(ctx context.Context) ([]*Image, error) {
+ children, err := i.getChildren(ctx, true)
+ if err != nil {
+ return nil, err
+ }
+ return children, nil
+}
+
+// getChildren returns a list of imageIDs that depend on the image. If all is
+// false, only the first child image is returned.
+func (i *Image) getChildren(ctx context.Context, all bool) ([]*Image, error) {
+ tree, err := i.runtime.layerTree()
+ if err != nil {
+ return nil, err
+ }
+
+ return tree.children(ctx, i, all)
+}
+
+// Containers returns a list of containers using the image.
+func (i *Image) Containers() ([]string, error) {
+ var containerIDs []string
+ containers, err := i.runtime.store.Containers()
+ if err != nil {
+ return nil, err
+ }
+ imageID := i.ID()
+ for i := range containers {
+ if containers[i].ImageID == imageID {
+ containerIDs = append(containerIDs, containers[i].ID)
+ }
+ }
+ return containerIDs, nil
+}
+
+// removeContainers removes all containers using the image.
+func (i *Image) removeContainers(fn RemoveContainerFunc) error {
+ // Execute the custom removal func if specified.
+ if fn != nil {
+ logrus.Debugf("Removing containers of image %s with custom removal function", i.ID())
+ if err := fn(i.ID()); err != nil {
+ return err
+ }
+ }
+
+ containers, err := i.Containers()
+ if err != nil {
+ return err
+ }
+
+ logrus.Debugf("Removing containers of image %s from the local containers storage", i.ID())
+ var multiE error
+ for _, cID := range containers {
+ if err := i.runtime.store.DeleteContainer(cID); err != nil {
+ // If the container does not exist anymore, we're good.
+ if errors.Cause(err) != storage.ErrContainerUnknown {
+ multiE = multierror.Append(multiE, err)
+ }
+ }
+ }
+
+ return multiE
+}
+
+// RemoveContainerFunc allows for customizing the removal of containers using
+// an image specified by imageID.
+type RemoveContainerFunc func(imageID string) error
+
+// RemoveImagesReport is the assembled data from removing *one* image.
+type RemoveImageReport struct {
+ // ID of the image.
+ ID string
+ // Image was removed.
+ Removed bool
+ // Size of the removed image. Only set when explicitly requested in
+ // RemoveImagesOptions.
+ Size int64
+ // The untagged tags.
+ Untagged []string
+}
+
+// remove removes the image along with all dangling parent images that no other
+// image depends on. The image must not be set read-only and not be used by
+// containers.
+//
+// If the image is used by containers return storage.ErrImageUsedByContainer.
+// Use force to remove these containers.
+//
+// NOTE: the rmMap is used to assemble image-removal data across multiple
+// invocations of this function. The recursive nature requires some
+// bookkeeping to make sure that all data is aggregated correctly.
+//
+// This function is internal. Users of libimage should always use
+// `(*Runtime).RemoveImages()`.
+func (i *Image) remove(ctx context.Context, rmMap map[string]*RemoveImageReport, referencedBy string, options *RemoveImagesOptions) error {
+ // If referencedBy is empty, the image is considered to be removed via
+ // `image remove --all` which alters the logic below.
+
+ // The removal logic below is complex. There is a number of rules
+ // inherited from Podman and Buildah (and Docker). This function
+ // should be the *only* place to extend the removal logic so we keep it
+ // sealed in one place. Make sure to add verbose comments to leave
+ // some breadcrumbs for future readers.
+ logrus.Debugf("Removing image %s", i.ID())
+
+ if i.IsReadOnly() {
+ return errors.Errorf("cannot remove read-only image %q", i.ID())
+ }
+
+ // Check if already visisted this image.
+ report, exists := rmMap[i.ID()]
+ if exists {
+ // If the image has already been removed, we're done.
+ if report.Removed {
+ return nil
+ }
+ } else {
+ report = &RemoveImageReport{ID: i.ID()}
+ rmMap[i.ID()] = report
+ }
+
+ // The image may have already been (partially) removed, so we need to
+ // have a closer look at the errors. On top, image removal should be
+ // tolerant toward corrupted images.
+ handleError := func(err error) error {
+ switch errors.Cause(err) {
+ case storage.ErrImageUnknown, storage.ErrNotAnImage, storage.ErrLayerUnknown:
+ // The image or layers of the image may already
+ // have been removed in which case we consider
+ // the image to be removed.
+ return nil
+ default:
+ return err
+ }
+ }
+
+ // Calculate the size if requested. `podman-image-prune` likes to
+ // report the regained size.
+ if options.WithSize {
+ size, err := i.Size()
+ if handleError(err) != nil {
+ return err
+ }
+ report.Size = size
+ }
+
+ skipRemove := false
+ numNames := len(i.Names())
+
+ // NOTE: the `numNames == 1` check is not only a performance
+ // optimization but also preserves exiting Podman/Docker behaviour.
+ // If image "foo" is used by a container and has only this tag/name,
+ // an `rmi foo` will not untag "foo" but instead attempt to remove the
+ // entire image. If there's a container using "foo", we should get an
+ // error.
+ if options.Force || referencedBy == "" || numNames == 1 {
+ // DO NOTHING, the image will be removed
+ } else {
+ byID := strings.HasPrefix(i.ID(), referencedBy)
+ byDigest := strings.HasPrefix(referencedBy, "sha256:")
+ if byID && numNames > 1 {
+ return errors.Errorf("unable to delete image %q by ID with more than one tag (%s): please force removal", i.ID(), i.Names())
+ } else if byDigest && numNames > 1 {
+ // FIXME - Docker will remove the digest but containers storage
+ // does not support that yet, so our hands are tied.
+ return errors.Errorf("unable to delete image %q by digest with more than one tag (%s): please force removal", i.ID(), i.Names())
+ }
+
+ // Only try to untag if we know it's not an ID or digest.
+ if !byID && !byDigest {
+ if err := i.Untag(referencedBy); handleError(err) != nil {
+ return err
+ }
+ report.Untagged = append(report.Untagged, referencedBy)
+
+ // If there's still tags left, we cannot delete it.
+ skipRemove = len(i.Names()) > 0
+ }
+ }
+
+ if skipRemove {
+ return nil
+ }
+
+ // Perform the actual removal. First, remove containers if needed.
+ if options.Force {
+ if err := i.removeContainers(options.RemoveContainerFunc); err != nil {
+ return err
+ }
+ }
+
+ // Podman/Docker compat: we only report an image as removed if it has
+ // no children. Otherwise, the data is effectively still present in the
+ // storage despite the image being removed.
+ hasChildren, err := i.HasChildren(ctx)
+ if err != nil {
+ // We must be tolerant toward corrupted images.
+ // See containers/podman commit fd9dd7065d44.
+ logrus.Warnf("error determining if an image is a parent: %v, ignoring the error", err)
+ hasChildren = false
+ }
+
+ // If there's a dangling parent that no other image depends on, remove
+ // it recursively.
+ parent, err := i.Parent(ctx)
+ if err != nil {
+ // We must be tolerant toward corrupted images.
+ // See containers/podman commit fd9dd7065d44.
+ logrus.Warnf("error determining parent of image: %v, ignoring the error", err)
+ parent = nil
+ }
+
+ if _, err := i.runtime.store.DeleteImage(i.ID(), true); handleError(err) != nil {
+ return err
+ }
+ report.Untagged = append(report.Untagged, i.Names()...)
+
+ if !hasChildren {
+ report.Removed = true
+ }
+
+ // Check if can remove the parent image.
+ if parent == nil {
+ return nil
+ }
+
+ if !parent.IsDangling() {
+ return nil
+ }
+
+ // If the image has siblings, we don't remove the parent.
+ hasSiblings, err := parent.HasChildren(ctx)
+ if err != nil {
+ // See Podman commit fd9dd7065d44: we need to
+ // be tolerant toward corrupted images.
+ logrus.Warnf("error determining if an image is a parent: %v, ignoring the error", err)
+ hasSiblings = false
+ }
+ if hasSiblings {
+ return nil
+ }
+
+ // Recurse into removing the parent.
+ return parent.remove(ctx, rmMap, "", options)
+}
+
+// Tag the image with the specified name and store it in the local containers
+// storage. The name is normalized according to the rules of NormalizeName.
+func (i *Image) Tag(name string) error {
+ ref, err := NormalizeName(name)
+ if err != nil {
+ return errors.Wrapf(err, "error normalizing name %q", name)
+ }
+
+ logrus.Debugf("Tagging image %s with %q", i.ID(), ref.String())
+
+ newNames := append(i.Names(), ref.String())
+ if err := i.runtime.store.SetNames(i.ID(), newNames); err != nil {
+ return err
+ }
+
+ return i.reload()
+}
+
+// to have some symmetry with the errors from containers/storage.
+var errTagUnknown = errors.New("tag not known")
+
+// TODO (@vrothberg) - `docker rmi sha256:` will remove the digest from the
+// image. However, that's something containers storage does not support.
+var errUntagDigest = errors.New("untag by digest not supported")
+
+// Untag the image with the specified name and make the change persistent in
+// the local containers storage. The name is normalized according to the rules
+// of NormalizeName.
+func (i *Image) Untag(name string) error {
+ if strings.HasPrefix(name, "sha256:") {
+ return errors.Wrap(errUntagDigest, name)
+ }
+
+ ref, err := NormalizeName(name)
+ if err != nil {
+ return errors.Wrapf(err, "error normalizing name %q", name)
+ }
+ name = ref.String()
+
+ logrus.Debugf("Untagging %q from image %s", ref.String(), i.ID())
+
+ removedName := false
+ newNames := []string{}
+ for _, n := range i.Names() {
+ if n == name {
+ removedName = true
+ continue
+ }
+ newNames = append(newNames, n)
+ }
+
+ if !removedName {
+ return errors.Wrap(errTagUnknown, name)
+ }
+
+ if err := i.runtime.store.SetNames(i.ID(), newNames); err != nil {
+ return err
+ }
+
+ return i.reload()
+}
+
+// RepoTags returns a string slice of repotags associated with the image.
+func (i *Image) RepoTags() ([]string, error) {
+ namedTagged, err := i.NamedTaggedRepoTags()
+ if err != nil {
+ return nil, err
+ }
+ repoTags := make([]string, len(namedTagged))
+ for i := range namedTagged {
+ repoTags[i] = namedTagged[i].String()
+ }
+ return repoTags, nil
+}
+
+// NamedTaggedRepoTags returns the repotags associated with the image as a
+// slice of reference.NamedTagged.
+func (i *Image) NamedTaggedRepoTags() ([]reference.NamedTagged, error) {
+ var repoTags []reference.NamedTagged
+ for _, name := range i.Names() {
+ parsed, err := reference.Parse(name)
+ if err != nil {
+ return nil, err
+ }
+ named, isNamed := parsed.(reference.Named)
+ if !isNamed {
+ continue
+ }
+ tagged, isTagged := named.(reference.NamedTagged)
+ if !isTagged {
+ continue
+ }
+ repoTags = append(repoTags, tagged)
+ }
+ return repoTags, nil
+}
+
+// NamedRepoTags returns the repotags associated with the image as a
+// slice of reference.Named.
+func (i *Image) NamedRepoTags() ([]reference.Named, error) {
+ var repoTags []reference.Named
+ for _, name := range i.Names() {
+ parsed, err := reference.Parse(name)
+ if err != nil {
+ return nil, err
+ }
+ if named, isNamed := parsed.(reference.Named); isNamed {
+ repoTags = append(repoTags, named)
+ }
+ }
+ return repoTags, nil
+}
+
+// inRepoTags looks for the specified name/tag pair in the image's repo tags.
+// Note that tag may be empty.
+func (i *Image) inRepoTags(name, tag string) (reference.Named, error) {
+ repoTags, err := i.NamedRepoTags()
+ if err != nil {
+ return nil, err
+ }
+
+ pairs, err := ToNameTagPairs(repoTags)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, pair := range pairs {
+ if tag != "" && tag != pair.Tag {
+ continue
+ }
+ if !strings.HasSuffix(pair.Name, name) {
+ continue
+ }
+ if len(pair.Name) == len(name) { // full match
+ return pair.named, nil
+ }
+ if pair.Name[len(pair.Name)-len(name)-1] == '/' { // matches at repo
+ return pair.named, nil
+ }
+ }
+
+ return nil, nil
+}
+
+// RepoDigests returns a string array of repodigests associated with the image.
+func (i *Image) RepoDigests() ([]string, error) {
+ repoDigests := []string{}
+ added := make(map[string]struct{})
+
+ for _, name := range i.Names() {
+ for _, imageDigest := range append(i.Digests(), i.Digest()) {
+ if imageDigest == "" {
+ continue
+ }
+
+ named, err := reference.ParseNormalizedNamed(name)
+ if err != nil {
+ return nil, err
+ }
+
+ canonical, err := reference.WithDigest(reference.TrimNamed(named), imageDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, alreadyInList := added[canonical.String()]; !alreadyInList {
+ repoDigests = append(repoDigests, canonical.String())
+ added[canonical.String()] = struct{}{}
+ }
+ }
+ }
+ sort.Strings(repoDigests)
+ return repoDigests, nil
+}
+
+// Mount the image with the specified mount options and label, both of which
+// are directly passed down to the containers storage. Returns the fully
+// evaluated path to the mount point.
+func (i *Image) Mount(ctx context.Context, mountOptions []string, mountLabel string) (string, error) {
+ mountPoint, err := i.runtime.store.MountImage(i.ID(), mountOptions, mountLabel)
+ if err != nil {
+ return "", err
+ }
+ mountPoint, err = filepath.EvalSymlinks(mountPoint)
+ if err != nil {
+ return "", err
+ }
+ logrus.Debugf("Mounted image %s at %q", i.ID(), mountPoint)
+ return mountPoint, nil
+}
+
+// Mountpoint returns the path to image's mount point. The path is empty if
+// the image is not mounted.
+func (i *Image) Mountpoint() (string, error) {
+ mountedTimes, err := i.runtime.store.Mounted(i.TopLayer())
+ if err != nil || mountedTimes == 0 {
+ if errors.Cause(err) == storage.ErrLayerUnknown {
+ // Can happen, Podman did it, but there's no
+ // explanation why.
+ err = nil
+ }
+ return "", err
+ }
+
+ layer, err := i.runtime.store.Layer(i.TopLayer())
+ if err != nil {
+ return "", err
+ }
+
+ mountPoint, err := filepath.EvalSymlinks(layer.MountPoint)
+ if err != nil {
+ return "", err
+ }
+
+ return mountPoint, nil
+}
+
+// Unmount the image. Use force to ignore the reference counter and forcefully
+// unmount.
+func (i *Image) Unmount(force bool) error {
+ logrus.Debugf("Unmounted image %s", i.ID())
+ _, err := i.runtime.store.UnmountImage(i.ID(), force)
+ return err
+}
+
+// MountPoint returns the fully-evaluated mount point of the image. If the
+// image isn't mounted, an empty string is returned.
+func (i *Image) MountPoint() (string, error) {
+ counter, err := i.runtime.store.Mounted(i.TopLayer())
+ if err != nil {
+ return "", err
+ }
+
+ if counter == 0 {
+ return "", nil
+ }
+
+ layer, err := i.runtime.store.Layer(i.TopLayer())
+ if err != nil {
+ return "", err
+ }
+ return filepath.EvalSymlinks(layer.MountPoint)
+}
+
+// Size computes the size of the image layers and associated data.
+func (i *Image) Size() (int64, error) {
+ return i.runtime.store.ImageSize(i.ID())
+}
+
+// HasDifferentDigest returns true if the image specified by `remoteRef` has a
+// different digest than the local one. This check can be useful to check for
+// updates on remote registries.
+func (i *Image) HasDifferentDigest(ctx context.Context, remoteRef types.ImageReference) (bool, error) {
+ // We need to account for the arch that the image uses. It seems
+ // common on ARM to tweak this option to pull the correct image. See
+ // github.com/containers/podman/issues/6613.
+ inspectInfo, err := i.inspectInfo(ctx)
+ if err != nil {
+ return false, err
+ }
+
+ sys := i.runtime.systemContextCopy()
+ sys.ArchitectureChoice = inspectInfo.Architecture
+ // OS and variant may not be set, so let's check to avoid accidental
+ // overrides of the runtime settings.
+ if inspectInfo.Os != "" {
+ sys.OSChoice = inspectInfo.Os
+ }
+ if inspectInfo.Variant != "" {
+ sys.VariantChoice = inspectInfo.Variant
+ }
+
+ remoteImg, err := remoteRef.NewImage(ctx, sys)
+ if err != nil {
+ return false, err
+ }
+
+ rawManifest, _, err := remoteImg.Manifest(ctx)
+ if err != nil {
+ return false, err
+ }
+
+ remoteDigest, err := manifest.Digest(rawManifest)
+ if err != nil {
+ return false, err
+ }
+
+ return i.Digest().String() != remoteDigest.String(), nil
+}
+
+// driverData gets the driver data from the store on a layer
+func (i *Image) driverData() (*DriverData, error) {
+ store := i.runtime.store
+ layerID := i.TopLayer()
+ driver, err := store.GraphDriver()
+ if err != nil {
+ return nil, err
+ }
+ metaData, err := driver.Metadata(layerID)
+ if err != nil {
+ return nil, err
+ }
+ if mountTimes, err := store.Mounted(layerID); mountTimes == 0 || err != nil {
+ delete(metaData, "MergedDir")
+ }
+ return &DriverData{
+ Name: driver.String(),
+ Data: metaData,
+ }, nil
+}
+
+// StorageReference returns the image's reference to the containers storage
+// using the image ID.
+func (i *Image) StorageReference() (types.ImageReference, error) {
+ if i.storageReference != nil {
+ return i.storageReference, nil
+ }
+ ref, err := storageTransport.Transport.ParseStoreReference(i.runtime.store, "@"+i.ID())
+ if err != nil {
+ return nil, err
+ }
+ i.storageReference = ref
+ return ref, nil
+}
+
+// source returns the possibly cached image reference.
+func (i *Image) source(ctx context.Context) (types.ImageSource, error) {
+ if i.cached.imageSource != nil {
+ return i.cached.imageSource, nil
+ }
+ ref, err := i.StorageReference()
+ if err != nil {
+ return nil, err
+ }
+ src, err := ref.NewImageSource(ctx, i.runtime.systemContextCopy())
+ if err != nil {
+ return nil, err
+ }
+ i.cached.imageSource = src
+ return src, nil
+}
+
+// rawConfigBlob returns the image's config as a raw byte slice. Users need to
+// unmarshal it to the corresponding type (OCI, Docker v2s{1,2})
+func (i *Image) rawConfigBlob(ctx context.Context) ([]byte, error) {
+ ref, err := i.StorageReference()
+ if err != nil {
+ return nil, err
+ }
+
+ imageCloser, err := ref.NewImage(ctx, i.runtime.systemContextCopy())
+ if err != nil {
+ return nil, err
+ }
+ defer imageCloser.Close()
+
+ return imageCloser.ConfigBlob(ctx)
+}
+
+// Manifest returns the raw data and the MIME type of the image's manifest.
+func (i *Image) Manifest(ctx context.Context) (rawManifest []byte, mimeType string, err error) {
+ src, err := i.source(ctx)
+ if err != nil {
+ return nil, "", err
+ }
+ return src.GetManifest(ctx, nil)
+}
+
+// getImageDigest creates an image object and uses the hex value of the digest as the image ID
+// for parsing the store reference
+func getImageDigest(ctx context.Context, src types.ImageReference, sys *types.SystemContext) (string, error) {
+ newImg, err := src.NewImage(ctx, sys)
+ if err != nil {
+ return "", err
+ }
+ defer func() {
+ if err := newImg.Close(); err != nil {
+ logrus.Errorf("failed to close image: %q", err)
+ }
+ }()
+ imageDigest := newImg.ConfigInfo().Digest
+ if err = imageDigest.Validate(); err != nil {
+ return "", errors.Wrapf(err, "error getting config info")
+ }
+ return "@" + imageDigest.Hex(), nil
+}
diff --git a/vendor/github.com/containers/common/libimage/image_config.go b/vendor/github.com/containers/common/libimage/image_config.go
new file mode 100644
index 000000000..b57121182
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/image_config.go
@@ -0,0 +1,242 @@
+package libimage
+
+import (
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/containers/common/pkg/signal"
+ ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// ImageConfig is a wrapper around the OCIv1 Image Configuration struct exported
+// by containers/image, but containing additional fields that are not supported
+// by OCIv1 (but are by Docker v2) - notably OnBuild.
+type ImageConfig struct {
+ ociv1.ImageConfig
+ OnBuild []string
+}
+
+// ImageConfigFromChanges produces a v1.ImageConfig from the --change flag that
+// is accepted by several Podman commands. It accepts a (limited subset) of
+// Dockerfile instructions.
+// Valid changes are:
+// * USER
+// * EXPOSE
+// * ENV
+// * ENTRYPOINT
+// * CMD
+// * VOLUME
+// * WORKDIR
+// * LABEL
+// * STOPSIGNAL
+// * ONBUILD
+func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:gocyclo
+ config := &ImageConfig{}
+
+ for _, change := range changes {
+ // First, let's assume proper Dockerfile format - space
+ // separator between instruction and value
+ split := strings.SplitN(change, " ", 2)
+
+ if len(split) != 2 {
+ split = strings.SplitN(change, "=", 2)
+ if len(split) != 2 {
+ return nil, errors.Errorf("invalid change %q - must be formatted as KEY VALUE", change)
+ }
+ }
+
+ outerKey := strings.ToUpper(strings.TrimSpace(split[0]))
+ value := strings.TrimSpace(split[1])
+ switch outerKey {
+ case "USER":
+ // Assume literal contents are the user.
+ if value == "" {
+ return nil, errors.Errorf("invalid change %q - must provide a value to USER", change)
+ }
+ config.User = value
+ case "EXPOSE":
+ // EXPOSE is either [portnum] or
+ // [portnum]/[proto]
+ // Protocol must be "tcp" or "udp"
+ splitPort := strings.Split(value, "/")
+ if len(splitPort) > 2 {
+ return nil, errors.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change)
+ }
+ portNum, err := strconv.Atoi(splitPort[0])
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid change %q - EXPOSE port must be an integer", change)
+ }
+ if portNum > 65535 || portNum <= 0 {
+ return nil, errors.Errorf("invalid change %q - EXPOSE port must be a valid port number", change)
+ }
+ proto := "tcp"
+ if len(splitPort) > 1 {
+ testProto := strings.ToLower(splitPort[1])
+ switch testProto {
+ case "tcp", "udp":
+ proto = testProto
+ default:
+ return nil, errors.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change)
+ }
+ }
+ if config.ExposedPorts == nil {
+ config.ExposedPorts = make(map[string]struct{})
+ }
+ config.ExposedPorts[fmt.Sprintf("%d/%s", portNum, proto)] = struct{}{}
+ case "ENV":
+ // Format is either:
+ // ENV key=value
+ // ENV key=value key=value ...
+ // ENV key value
+ // Both keys and values can be surrounded by quotes to group them.
+ // For now: we only support key=value
+ // We will attempt to strip quotation marks if present.
+
+ var (
+ key, val string
+ )
+
+ splitEnv := strings.SplitN(value, "=", 2)
+ key = splitEnv[0]
+ // We do need a key
+ if key == "" {
+ return nil, errors.Errorf("invalid change %q - ENV must have at least one argument", change)
+ }
+ // Perfectly valid to not have a value
+ if len(splitEnv) == 2 {
+ val = splitEnv[1]
+ }
+
+ if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) {
+ key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`)
+ }
+ if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) {
+ val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`)
+ }
+ config.Env = append(config.Env, fmt.Sprintf("%s=%s", key, val))
+ case "ENTRYPOINT":
+ // Two valid forms.
+ // First, JSON array.
+ // Second, not a JSON array - we interpret this as an
+ // argument to `sh -c`, unless empty, in which case we
+ // just use a blank entrypoint.
+ testUnmarshal := []string{}
+ if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil {
+ // It ain't valid JSON, so assume it's an
+ // argument to sh -c if not empty.
+ if value != "" {
+ config.Entrypoint = []string{"/bin/sh", "-c", value}
+ } else {
+ config.Entrypoint = []string{}
+ }
+ } else {
+ // Valid JSON
+ config.Entrypoint = testUnmarshal
+ }
+ case "CMD":
+ // Same valid forms as entrypoint.
+ // However, where ENTRYPOINT assumes that 'ENTRYPOINT '
+ // means no entrypoint, CMD assumes it is 'sh -c' with
+ // no third argument.
+ testUnmarshal := []string{}
+ if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil {
+ // It ain't valid JSON, so assume it's an
+ // argument to sh -c.
+ // Only include volume if it's not ""
+ config.Cmd = []string{"/bin/sh", "-c"}
+ if value != "" {
+ config.Cmd = append(config.Cmd, value)
+ }
+ } else {
+ // Valid JSON
+ config.Cmd = testUnmarshal
+ }
+ case "VOLUME":
+ // Either a JSON array or a set of space-separated
+ // paths.
+ // Acts rather similar to ENTRYPOINT and CMD, but always
+ // appends rather than replacing, and no sh -c prepend.
+ testUnmarshal := []string{}
+ if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil {
+ // Not valid JSON, so split on spaces
+ testUnmarshal = strings.Split(value, " ")
+ }
+ if len(testUnmarshal) == 0 {
+ return nil, errors.Errorf("invalid change %q - must provide at least one argument to VOLUME", change)
+ }
+ for _, vol := range testUnmarshal {
+ if vol == "" {
+ return nil, errors.Errorf("invalid change %q - VOLUME paths must not be empty", change)
+ }
+ if config.Volumes == nil {
+ config.Volumes = make(map[string]struct{})
+ }
+ config.Volumes[vol] = struct{}{}
+ }
+ case "WORKDIR":
+ // This can be passed multiple times.
+ // Each successive invocation is treated as relative to
+ // the previous one - so WORKDIR /A, WORKDIR b,
+ // WORKDIR c results in /A/b/c
+ // Just need to check it's not empty...
+ if value == "" {
+ return nil, errors.Errorf("invalid change %q - must provide a non-empty WORKDIR", change)
+ }
+ config.WorkingDir = filepath.Join(config.WorkingDir, value)
+ case "LABEL":
+ // Same general idea as ENV, but we no longer allow " "
+ // as a separator.
+ // We didn't do that for ENV either, so nice and easy.
+ // Potentially problematic: LABEL might theoretically
+ // allow an = in the key? If people really do this, we
+ // may need to investigate more advanced parsing.
+ var (
+ key, val string
+ )
+
+ splitLabel := strings.SplitN(value, "=", 2)
+ // Unlike ENV, LABEL must have a value
+ if len(splitLabel) != 2 {
+ return nil, errors.Errorf("invalid change %q - LABEL must be formatted key=value", change)
+ }
+ key = splitLabel[0]
+ val = splitLabel[1]
+
+ if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) {
+ key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`)
+ }
+ if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) {
+ val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`)
+ }
+ // Check key after we strip quotations
+ if key == "" {
+ return nil, errors.Errorf("invalid change %q - LABEL must have a non-empty key", change)
+ }
+ if config.Labels == nil {
+ config.Labels = make(map[string]string)
+ }
+ config.Labels[key] = val
+ case "STOPSIGNAL":
+ // Check the provided signal for validity.
+ killSignal, err := signal.ParseSignal(value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid change %q - KILLSIGNAL must be given a valid signal", change)
+ }
+ config.StopSignal = fmt.Sprintf("%d", killSignal)
+ case "ONBUILD":
+ // Onbuild always appends.
+ if value == "" {
+ return nil, errors.Errorf("invalid change %q - ONBUILD must be given an argument", change)
+ }
+ config.OnBuild = append(config.OnBuild, value)
+ default:
+ return nil, errors.Errorf("invalid change %q - invalid instruction %s", change, outerKey)
+ }
+ }
+
+ return config, nil
+}
diff --git a/vendor/github.com/containers/common/libimage/image_tree.go b/vendor/github.com/containers/common/libimage/image_tree.go
new file mode 100644
index 000000000..6583a7007
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/image_tree.go
@@ -0,0 +1,96 @@
+package libimage
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/disiqueira/gotree/v3"
+ "github.com/docker/go-units"
+)
+
+// Tree generates a tree for the specified image and its layers. Use
+// `traverseChildren` to traverse the layers of all children. By default, only
+// layers of the image are printed.
+func (i *Image) Tree(traverseChildren bool) (string, error) {
+ // NOTE: a string builder prevents us from copying to much data around
+ // and compile the string when and where needed.
+ sb := &strings.Builder{}
+
+ // First print the pretty header for the target image.
+ size, err := i.Size()
+ if err != nil {
+ return "", err
+ }
+ repoTags, err := i.RepoTags()
+ if err != nil {
+ return "", err
+ }
+
+ fmt.Fprintf(sb, "Image ID: %s\n", i.ID()[:12])
+ fmt.Fprintf(sb, "Tags: %s\n", repoTags)
+ fmt.Fprintf(sb, "Size: %v\n", units.HumanSizeWithPrecision(float64(size), 4))
+ if i.TopLayer() != "" {
+ fmt.Fprintf(sb, "Image Layers")
+ } else {
+ fmt.Fprintf(sb, "No Image Layers")
+ }
+
+ tree := gotree.New(sb.String())
+
+ layerTree, err := i.runtime.layerTree()
+ if err != nil {
+ return "", err
+ }
+
+ imageNode := layerTree.node(i.TopLayer())
+
+ // Traverse the entire tree down to all children.
+ if traverseChildren {
+ if err := imageTreeTraverseChildren(imageNode, tree); err != nil {
+ return "", err
+ }
+ } else {
+ // Walk all layers of the image and assemlbe their data.
+ for parentNode := imageNode; parentNode != nil; parentNode = parentNode.parent {
+ if parentNode.layer == nil {
+ break // we're done
+ }
+ var tags string
+ repoTags, err := parentNode.repoTags()
+ if err != nil {
+ return "", err
+ }
+ if len(repoTags) > 0 {
+ tags = fmt.Sprintf(" Top Layer of: %s", repoTags)
+ }
+ tree.Add(fmt.Sprintf("ID: %s Size: %7v%s", parentNode.layer.ID[:12], units.HumanSizeWithPrecision(float64(parentNode.layer.UncompressedSize), 4), tags))
+ }
+ }
+
+ return tree.Print(), nil
+}
+
+func imageTreeTraverseChildren(node *layerNode, parent gotree.Tree) error {
+ var tags string
+ repoTags, err := node.repoTags()
+ if err != nil {
+ return err
+ }
+ if len(repoTags) > 0 {
+ tags = fmt.Sprintf(" Top Layer of: %s", repoTags)
+ }
+
+ newNode := parent.Add(fmt.Sprintf("ID: %s Size: %7v%s", node.layer.ID[:12], units.HumanSizeWithPrecision(float64(node.layer.UncompressedSize), 4), tags))
+
+ if len(node.children) <= 1 {
+ newNode = parent
+ }
+ for i := range node.children {
+ child := node.children[i]
+ if err := imageTreeTraverseChildren(child, newNode); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go
new file mode 100644
index 000000000..4cce4c9ca
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/import.go
@@ -0,0 +1,108 @@
+package libimage
+
+import (
+ "context"
+ "net/url"
+ "os"
+
+ storageTransport "github.com/containers/image/v5/storage"
+ tarballTransport "github.com/containers/image/v5/tarball"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// ImportOptions allow for customizing image imports.
+type ImportOptions struct {
+ CopyOptions
+
+ // Apply the specified changes to the created image. Please refer to
+ // `ImageConfigFromChanges` for supported change instructions.
+ Changes []string
+ // Set the commit message as a comment to created image's history.
+ CommitMessage string
+ // Tag the imported image with this value.
+ Tag string
+}
+
+// Import imports a custom tarball at the specified path. Returns the name of
+// the imported image.
+func (r *Runtime) Import(ctx context.Context, path string, options *ImportOptions) (string, error) {
+ logrus.Debugf("Importing image from %q", path)
+
+ if options == nil {
+ options = &ImportOptions{}
+ }
+
+ ic := v1.ImageConfig{}
+ if len(options.Changes) > 0 {
+ config, err := ImageConfigFromChanges(options.Changes)
+ if err != nil {
+ return "", err
+ }
+ ic = config.ImageConfig
+ }
+
+ hist := []v1.History{
+ {Comment: options.CommitMessage},
+ }
+
+ config := v1.Image{
+ Config: ic,
+ History: hist,
+ }
+
+ u, err := url.ParseRequestURI(path)
+ if err == nil && u.Scheme != "" {
+ // If source is a URL, download the file.
+ file, err := r.downloadFromURL(path)
+ if err != nil {
+ return "", err
+ }
+ defer os.Remove(file)
+ path = file
+ } else if path == "-" {
+ // "-" special cases stdin
+ path = os.Stdin.Name()
+ }
+
+ srcRef, err := tarballTransport.Transport.ParseReference(path)
+ if err != nil {
+ return "", err
+ }
+
+ updater, ok := srcRef.(tarballTransport.ConfigUpdater)
+ if !ok {
+ return "", errors.New("unexpected type, a tarball reference should implement tarball.ConfigUpdater")
+ }
+ annotations := make(map[string]string)
+ if err := updater.ConfigUpdate(config, annotations); err != nil {
+ return "", err
+ }
+
+ name := options.Tag
+ if name == "" {
+ name, err = getImageDigest(ctx, srcRef, r.systemContextCopy())
+ if err != nil {
+ return "", err
+ }
+ name = "sha256:" + name[1:] // strip leading "@"
+ }
+
+ destRef, err := storageTransport.Transport.ParseStoreReference(r.store, name)
+ if err != nil {
+ return "", err
+ }
+
+ c, err := r.newCopier(&options.CopyOptions)
+ if err != nil {
+ return "", err
+ }
+ defer c.close()
+
+ if _, err := c.copy(ctx, srcRef, destRef); err != nil {
+ return "", err
+ }
+
+ return name, nil
+}
diff --git a/vendor/github.com/containers/common/libimage/inspect.go b/vendor/github.com/containers/common/libimage/inspect.go
new file mode 100644
index 000000000..349709155
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/inspect.go
@@ -0,0 +1,206 @@
+package libimage
+
+import (
+ "context"
+ "encoding/json"
+ "time"
+
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+// ImageData contains the inspected data of an image.
+type ImageData struct {
+ ID string `json:"Id"`
+ Digest digest.Digest `json:"Digest"`
+ RepoTags []string `json:"RepoTags"`
+ RepoDigests []string `json:"RepoDigests"`
+ Parent string `json:"Parent"`
+ Comment string `json:"Comment"`
+ Created *time.Time `json:"Created"`
+ Config *ociv1.ImageConfig `json:"Config"`
+ Version string `json:"Version"`
+ Author string `json:"Author"`
+ Architecture string `json:"Architecture"`
+ Os string `json:"Os"`
+ Size int64 `json:"Size"`
+ VirtualSize int64 `json:"VirtualSize"`
+ GraphDriver *DriverData `json:"GraphDriver"`
+ RootFS *RootFS `json:"RootFS"`
+ Labels map[string]string `json:"Labels"`
+ Annotations map[string]string `json:"Annotations"`
+ ManifestType string `json:"ManifestType"`
+ User string `json:"User"`
+ History []ociv1.History `json:"History"`
+ NamesHistory []string `json:"NamesHistory"`
+ HealthCheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"`
+}
+
+// DriverData includes data on the storage driver of the image.
+type DriverData struct {
+ Name string `json:"Name"`
+ Data map[string]string `json:"Data"`
+}
+
+// RootFS includes data on the root filesystem of the image.
+type RootFS struct {
+ Type string `json:"Type"`
+ Layers []digest.Digest `json:"Layers"`
+}
+
+// Inspect inspects the image. Use `withSize` to also perform the
+// comparatively expensive size computation of the image.
+func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) {
+ logrus.Debugf("Inspecting image %s", i.ID())
+
+ if i.cached.completeInspectData != nil {
+ if withSize && i.cached.completeInspectData.Size == int64(-1) {
+ size, err := i.Size()
+ if err != nil {
+ return nil, err
+ }
+ i.cached.completeInspectData.Size = size
+ }
+ return i.cached.completeInspectData, nil
+ }
+
+ // First assemble data that does not depend on the format of the image.
+ info, err := i.inspectInfo(ctx)
+ if err != nil {
+ return nil, err
+ }
+ ociImage, err := i.toOCI(ctx)
+ if err != nil {
+ return nil, err
+ }
+ parentImage, err := i.Parent(ctx)
+ if err != nil {
+ return nil, err
+ }
+ repoTags, err := i.RepoTags()
+ if err != nil {
+ return nil, err
+ }
+ repoDigests, err := i.RepoDigests()
+ if err != nil {
+ return nil, err
+ }
+ driverData, err := i.driverData()
+ if err != nil {
+ return nil, err
+ }
+
+ size := int64(-1)
+ if withSize {
+ size, err = i.Size()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ data := &ImageData{
+ ID: i.ID(),
+ RepoTags: repoTags,
+ RepoDigests: repoDigests,
+ Created: ociImage.Created,
+ Author: ociImage.Author,
+ Architecture: ociImage.Architecture,
+ Os: ociImage.OS,
+ Config: &ociImage.Config,
+ Version: info.DockerVersion,
+ Size: size,
+ VirtualSize: size, // TODO: they should be different (inherited from Podman)
+ Digest: i.Digest(),
+ Labels: info.Labels,
+ RootFS: &RootFS{
+ Type: ociImage.RootFS.Type,
+ Layers: ociImage.RootFS.DiffIDs,
+ },
+ GraphDriver: driverData,
+ User: ociImage.Config.User,
+ History: ociImage.History,
+ NamesHistory: i.NamesHistory(),
+ }
+
+ if parentImage != nil {
+ data.Parent = parentImage.ID()
+ }
+
+ // Determine the format of the image. How we determine certain data
+ // depends on the format (e.g., Docker v2s2, OCI v1).
+ src, err := i.source(ctx)
+ if err != nil {
+ return nil, err
+ }
+ manifestRaw, manifestType, err := src.GetManifest(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ data.ManifestType = manifestType
+
+ switch manifestType {
+ // OCI image
+ case ociv1.MediaTypeImageManifest:
+ var ociManifest ociv1.Manifest
+ if err := json.Unmarshal(manifestRaw, &ociManifest); err != nil {
+ return nil, err
+ }
+ data.Annotations = ociManifest.Annotations
+ if len(ociImage.History) > 0 {
+ data.Comment = ociImage.History[0].Comment
+ }
+
+ // Docker image
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema2MediaType:
+ rawConfig, err := i.rawConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ var dockerManifest manifest.Schema2V1Image
+ if err := json.Unmarshal(rawConfig, &dockerManifest); err != nil {
+ return nil, err
+ }
+ data.Comment = dockerManifest.Comment
+ data.HealthCheck = dockerManifest.ContainerConfig.Healthcheck
+ }
+
+ if data.Annotations == nil {
+ // Podman compat
+ data.Annotations = make(map[string]string)
+ }
+
+ i.cached.completeInspectData = data
+
+ return data, nil
+}
+
+// inspectInfo returns the image inspect info.
+func (i *Image) inspectInfo(ctx context.Context) (*types.ImageInspectInfo, error) {
+ if i.cached.partialInspectData != nil {
+ return i.cached.partialInspectData, nil
+ }
+
+ ref, err := i.StorageReference()
+ if err != nil {
+
+ return nil, err
+ }
+
+ img, err := ref.NewImage(ctx, i.runtime.systemContextCopy())
+ if err != nil {
+ return nil, err
+ }
+ defer img.Close()
+
+ data, err := img.Inspect(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ i.cached.partialInspectData = data
+ return data, nil
+}
diff --git a/vendor/github.com/containers/common/libimage/layer_tree.go b/vendor/github.com/containers/common/libimage/layer_tree.go
new file mode 100644
index 000000000..7e0940339
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/layer_tree.go
@@ -0,0 +1,249 @@
+package libimage
+
+import (
+ "context"
+
+ "github.com/containers/storage"
+ ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+// layerTree is an internal representation of local layers.
+type layerTree struct {
+ // nodes is the actual layer tree with layer IDs being keys.
+ nodes map[string]*layerNode
+ // ociCache is a cache for Image.ID -> OCI Image. Translations are done
+ // on-demand.
+ ociCache map[string]*ociv1.Image
+}
+
+// node returns a layerNode for the specified layerID.
+func (t *layerTree) node(layerID string) *layerNode {
+ node, exists := t.nodes[layerID]
+ if !exists {
+ node = &layerNode{}
+ t.nodes[layerID] = node
+ }
+ return node
+}
+
+// toOCI returns an OCI image for the specified image.
+func (t *layerTree) toOCI(ctx context.Context, i *Image) (*ociv1.Image, error) {
+ var err error
+ oci, exists := t.ociCache[i.ID()]
+ if !exists {
+ oci, err = i.toOCI(ctx)
+ if err == nil {
+ t.ociCache[i.ID()] = oci
+ }
+ }
+ return oci, err
+}
+
+// layerNode is a node in a layerTree. It's ID is the key in a layerTree.
+type layerNode struct {
+ children []*layerNode
+ images []*Image
+ parent *layerNode
+ layer *storage.Layer
+}
+
+// repoTags assemble all repo tags all of images of the layer node.
+func (l *layerNode) repoTags() ([]string, error) {
+ orderedTags := []string{}
+ visitedTags := make(map[string]bool)
+
+ for _, image := range l.images {
+ repoTags, err := image.RepoTags()
+ if err != nil {
+ return nil, err
+ }
+ for _, tag := range repoTags {
+ if _, visted := visitedTags[tag]; visted {
+ continue
+ }
+ visitedTags[tag] = true
+ orderedTags = append(orderedTags, tag)
+ }
+ }
+
+ return orderedTags, nil
+}
+
+// layerTree extracts a layerTree from the layers in the local storage and
+// relates them to the specified images.
+func (r *Runtime) layerTree() (*layerTree, error) {
+ layers, err := r.store.Layers()
+ if err != nil {
+ return nil, err
+ }
+
+ images, err := r.ListImages(context.Background(), nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ tree := layerTree{
+ nodes: make(map[string]*layerNode),
+ ociCache: make(map[string]*ociv1.Image),
+ }
+
+ // First build a tree purely based on layer information.
+ for i := range layers {
+ node := tree.node(layers[i].ID)
+ node.layer = &layers[i]
+ if layers[i].Parent == "" {
+ continue
+ }
+ parent := tree.node(layers[i].Parent)
+ node.parent = parent
+ parent.children = append(parent.children, node)
+ }
+
+ // Now assign the images to each (top) layer.
+ for i := range images {
+ img := images[i] // do not leak loop variable outside the scope
+ topLayer := img.TopLayer()
+ if topLayer == "" {
+ continue
+ }
+ node, exists := tree.nodes[topLayer]
+ if !exists {
+ // Note: erroring out in this case has turned out having been a
+ // mistake. Users may not be able to recover, so we're now
+ // throwing a warning to guide them to resolve the issue and
+ // turn the errors non-fatal.
+ logrus.Warnf("Top layer %s of image %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", topLayer, img.ID())
+ continue
+ }
+ node.images = append(node.images, img)
+ }
+
+ return &tree, nil
+}
+
+// children returns the child images of parent. Child images are images with
+// either the same top layer as parent or parent being the true parent layer.
+// Furthermore, the history of the parent and child images must match with the
+// parent having one history item less. If all is true, all images are
+// returned. Otherwise, the first image is returned.
+func (t *layerTree) children(ctx context.Context, parent *Image, all bool) ([]*Image, error) {
+ if parent.TopLayer() == "" {
+ return nil, nil
+ }
+
+ var children []*Image
+
+ parentNode, exists := t.nodes[parent.TopLayer()]
+ if !exists {
+ // Note: erroring out in this case has turned out having been a
+ // mistake. Users may not be able to recover, so we're now
+ // throwing a warning to guide them to resolve the issue and
+ // turn the errors non-fatal.
+ logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", parent.TopLayer())
+ return children, nil
+ }
+
+ parentID := parent.ID()
+ parentOCI, err := t.toOCI(ctx, parent)
+ if err != nil {
+ return nil, err
+ }
+
+ // checkParent returns true if child and parent are in such a relation.
+ checkParent := func(child *Image) (bool, error) {
+ if parentID == child.ID() {
+ return false, nil
+ }
+ childOCI, err := t.toOCI(ctx, child)
+ if err != nil {
+ return false, err
+ }
+ // History check.
+ return areParentAndChild(parentOCI, childOCI), nil
+ }
+
+ // addChildrenFrom adds child images of parent to children. Returns
+ // true if any image is a child of parent.
+ addChildrenFromNode := func(node *layerNode) (bool, error) {
+ foundChildren := false
+ for i, childImage := range node.images {
+ isChild, err := checkParent(childImage)
+ if err != nil {
+ return foundChildren, err
+ }
+ if isChild {
+ foundChildren = true
+ children = append(children, node.images[i])
+ if all {
+ return foundChildren, nil
+ }
+ }
+ }
+ return foundChildren, nil
+ }
+
+ // First check images where parent's top layer is also the parent
+ // layer.
+ for _, childNode := range parentNode.children {
+ found, err := addChildrenFromNode(childNode)
+ if err != nil {
+ return nil, err
+ }
+ if found && all {
+ return children, nil
+ }
+ }
+
+ // Now check images with the same top layer.
+ if _, err := addChildrenFromNode(parentNode); err != nil {
+ return nil, err
+ }
+
+ return children, nil
+}
+
+// parent returns the parent image or nil if no parent image could be found.
+func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) {
+ if child.TopLayer() == "" {
+ return nil, nil
+ }
+
+ node, exists := t.nodes[child.TopLayer()]
+ if !exists {
+ // Note: erroring out in this case has turned out having been a
+ // mistake. Users may not be able to recover, so we're now
+ // throwing a warning to guide them to resolve the issue and
+ // turn the errors non-fatal.
+ logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", child.TopLayer())
+ return nil, nil
+ }
+
+ childOCI, err := t.toOCI(ctx, child)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check images from the parent node (i.e., parent layer) and images
+ // with the same layer (i.e., same top layer).
+ childID := child.ID()
+ images := node.images
+ if node.parent != nil {
+ images = append(images, node.parent.images...)
+ }
+ for _, parent := range images {
+ if parent.ID() == childID {
+ continue
+ }
+ parentOCI, err := t.toOCI(ctx, parent)
+ if err != nil {
+ return nil, err
+ }
+ // History check.
+ if areParentAndChild(parentOCI, childOCI) {
+ return parent, nil
+ }
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/common/libimage/load.go b/vendor/github.com/containers/common/libimage/load.go
new file mode 100644
index 000000000..c606aca5b
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/load.go
@@ -0,0 +1,125 @@
+package libimage
+
+import (
+ "context"
+ "errors"
+ "os"
+
+ dirTransport "github.com/containers/image/v5/directory"
+ dockerArchiveTransport "github.com/containers/image/v5/docker/archive"
+ ociArchiveTransport "github.com/containers/image/v5/oci/archive"
+ ociTransport "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/types"
+ "github.com/sirupsen/logrus"
+)
+
+type LoadOptions struct {
+ CopyOptions
+}
+
+// Load loads one or more images (depending on the transport) from the
+// specified path. The path may point to an image the following transports:
+// oci, oci-archive, dir, docker-archive.
+func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ([]string, error) {
+ logrus.Debugf("Loading image from %q", path)
+
+ var (
+ loadedImages []string
+ loadError error
+ )
+
+ if options == nil {
+ options = &LoadOptions{}
+ }
+
+ for _, f := range []func() ([]string, error){
+ // OCI
+ func() ([]string, error) {
+ logrus.Debugf("-> Attempting to load %q as an OCI directory", path)
+ ref, err := ociTransport.NewReference(path, "")
+ if err != nil {
+ return nil, err
+ }
+ return r.copyFromDefault(ctx, ref, &options.CopyOptions)
+ },
+
+ // OCI-ARCHIVE
+ func() ([]string, error) {
+ logrus.Debugf("-> Attempting to load %q as an OCI archive", path)
+ ref, err := ociArchiveTransport.NewReference(path, "")
+ if err != nil {
+ return nil, err
+ }
+ return r.copyFromDefault(ctx, ref, &options.CopyOptions)
+ },
+
+ // DIR
+ func() ([]string, error) {
+ logrus.Debugf("-> Attempting to load %q as a Docker dir", path)
+ ref, err := dirTransport.NewReference(path)
+ if err != nil {
+ return nil, err
+ }
+ return r.copyFromDefault(ctx, ref, &options.CopyOptions)
+ },
+
+ // DOCKER-ARCHIVE
+ func() ([]string, error) {
+ logrus.Debugf("-> Attempting to load %q as a Docker archive", path)
+ ref, err := dockerArchiveTransport.ParseReference(path)
+ if err != nil {
+ return nil, err
+ }
+ return r.loadMultiImageDockerArchive(ctx, ref, &options.CopyOptions)
+ },
+
+ // Give a decent error message if nothing above worked.
+ func() ([]string, error) {
+ return nil, errors.New("payload does not match any of the supported image formats (oci, oci-archive, dir, docker-archive)")
+ },
+ } {
+ loadedImages, loadError = f()
+ if loadError == nil {
+ return loadedImages, loadError
+ }
+ logrus.Debugf("Error loading %s: %v", path, loadError)
+ }
+
+ return nil, loadError
+}
+
+// loadMultiImageDockerArchive loads the docker archive specified by ref. In
+// case the path@reference notation was used, only the specifiec image will be
+// loaded. Otherwise, all images will be loaded.
+func (r *Runtime) loadMultiImageDockerArchive(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) {
+ // If we cannot stat the path, it either does not exist OR the correct
+ // syntax to reference an image within the archive was used, so we
+ // should.
+ path := ref.StringWithinTransport()
+ if _, err := os.Stat(path); err != nil {
+ return r.copyFromDockerArchive(ctx, ref, options)
+ }
+
+ reader, err := dockerArchiveTransport.NewReader(r.systemContextCopy(), path)
+ if err != nil {
+ return nil, err
+ }
+
+ refLists, err := reader.List()
+ if err != nil {
+ return nil, err
+ }
+
+ var copiedImages []string
+ for _, list := range refLists {
+ for _, listRef := range list {
+ names, err := r.copyFromDockerArchiveReaderReference(ctx, reader, listRef, options)
+ if err != nil {
+ return nil, err
+ }
+ copiedImages = append(copiedImages, names...)
+ }
+ }
+
+ return copiedImages, nil
+}
diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go
new file mode 100644
index 000000000..72a2cf55f
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/manifest_list.go
@@ -0,0 +1,389 @@
+package libimage
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/common/libimage/manifests"
+ imageCopy "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// NOTE: the abstractions and APIs here are a first step to further merge
+// `libimage/manifests` into `libimage`.
+
+// ManifestList represents a manifest list (Docker) or an image index (OCI) in
+// the local containers storage.
+type ManifestList struct {
+ // NOTE: the *List* suffix is intentional as the term "manifest" is
+ // used ambiguously across the ecosystem. It may refer to the (JSON)
+ // manifest of an ordinary image OR to a manifest *list* (Docker) or to
+ // image index (OCI).
+ // It's a bit more work when typing but without ambiguity.
+
+ // The underlying image in the containers storage.
+ image *Image
+
+ // The underlying manifest list.
+ list manifests.List
+}
+
+// ID returns the ID of the manifest list.
+func (m *ManifestList) ID() string {
+ return m.image.ID()
+}
+
+// CreateManifestList creates a new empty manifest list with the specified
+// name.
+func (r *Runtime) CreateManifestList(name string) (*ManifestList, error) {
+ normalized, err := NormalizeName(name)
+ if err != nil {
+ return nil, err
+ }
+
+ list := manifests.Create()
+ listID, err := list.SaveToImage(r.store, "", []string{normalized.String()}, manifest.DockerV2ListMediaType)
+ if err != nil {
+ return nil, err
+ }
+
+ mList, err := r.LookupManifestList(listID)
+ if err != nil {
+ return nil, err
+ }
+
+ return mList, nil
+}
+
+// LookupManifestList looks up a manifest list with the specified name in the
+// containers storage.
+func (r *Runtime) LookupManifestList(name string) (*ManifestList, error) {
+ image, list, err := r.lookupManifestList(name)
+ if err != nil {
+ return nil, err
+ }
+ return &ManifestList{image: image, list: list}, nil
+}
+
+func (r *Runtime) lookupManifestList(name string) (*Image, manifests.List, error) {
+ image, _, err := r.LookupImage(name, &LookupImageOptions{IgnorePlatform: true})
+ if err != nil {
+ return nil, nil, err
+ }
+ if err := image.reload(); err != nil {
+ return nil, nil, err
+ }
+ list, err := image.getManifestList()
+ if err != nil {
+ return nil, nil, err
+ }
+ return image, list, nil
+}
+
+// ToManifestList converts the image into a manifest list. An error is thrown
+// if the image is no manifest list.
+func (i *Image) ToManifestList() (*ManifestList, error) {
+ list, err := i.getManifestList()
+ if err != nil {
+ return nil, err
+ }
+ return &ManifestList{image: i, list: list}, nil
+}
+
+// LookupInstance looks up an instance of the manifest list matching the
+// specified platform. The local machine's platform is used if left empty.
+func (m *ManifestList) LookupInstance(ctx context.Context, architecture, os, variant string) (*Image, error) {
+ sys := m.image.runtime.systemContextCopy()
+ if architecture != "" {
+ sys.ArchitectureChoice = architecture
+ }
+ if os != "" {
+ sys.OSChoice = os
+ }
+ if architecture != "" {
+ sys.VariantChoice = variant
+ }
+
+ // Now look at the *manifest* and select a matching instance.
+ rawManifest, manifestType, err := m.image.Manifest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ list, err := manifest.ListFromBlob(rawManifest, manifestType)
+ if err != nil {
+ return nil, err
+ }
+ instanceDigest, err := list.ChooseInstance(sys)
+ if err != nil {
+ return nil, err
+ }
+
+ allImages, err := m.image.runtime.ListImages(ctx, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, image := range allImages {
+ for _, imageDigest := range append(image.Digests(), image.Digest()) {
+ if imageDigest == instanceDigest {
+ return image, nil
+ }
+ }
+ }
+
+ return nil, errors.Wrapf(storage.ErrImageUnknown, "could not find image instance %s of manifest list %s in local containers storage", instanceDigest, m.ID())
+}
+
+// Saves the specified manifest list and reloads it from storage with the new ID.
+func (m *ManifestList) saveAndReload() error {
+ newID, err := m.list.SaveToImage(m.image.runtime.store, m.image.ID(), nil, "")
+ if err != nil {
+ return err
+ }
+
+ // Make sure to reload the image from the containers storage to fetch
+ // the latest data (e.g., new or delete digests).
+ if err := m.image.reload(); err != nil {
+ return err
+ }
+ image, list, err := m.image.runtime.lookupManifestList(newID)
+ if err != nil {
+ return err
+ }
+ m.image = image
+ m.list = list
+ return nil
+}
+
+// getManifestList is a helper to obtain a manifest list
+func (i *Image) getManifestList() (manifests.List, error) {
+ _, list, err := manifests.LoadFromImage(i.runtime.store, i.ID())
+ return list, err
+}
+
+// IsManifestList returns true if the image is a manifest list (Docker) or an
+// image index (OCI). This information may be critical to make certain
+// execution paths more robust (e.g., suppress certain errors).
+func (i *Image) IsManifestList(ctx context.Context) (bool, error) {
+ ref, err := i.StorageReference()
+ if err != nil {
+ return false, err
+ }
+ imgRef, err := ref.NewImageSource(ctx, i.runtime.systemContextCopy())
+ if err != nil {
+ return false, err
+ }
+ _, manifestType, err := imgRef.GetManifest(ctx, nil)
+ if err != nil {
+ return false, err
+ }
+ return manifest.MIMETypeIsMultiImage(manifestType), nil
+}
+
+// Inspect returns a dockerized version of the manifest list.
+func (m *ManifestList) Inspect() (*manifest.Schema2List, error) {
+ return m.list.Docker(), nil
+}
+
+// Options for adding a manifest list.
+type ManifestListAddOptions struct {
+ // Add all images to the list if the to-be-added image itself is a
+ // manifest list.
+ All bool `json:"all"`
+ // containers-auth.json(5) file to use when authenticating against
+ // container registries.
+ AuthFilePath string
+ // Path to the certificates directory.
+ CertDirPath string
+ // Allow contacting registries over HTTP, or HTTPS with failed TLS
+ // verification. Note that this does not affect other TLS connections.
+ InsecureSkipTLSVerify types.OptionalBool
+ // Username to use when authenticating at a container registry.
+ Username string
+ // Password to use when authenticating at a container registry.
+ Password string
+}
+
+// Add adds one or more manifests to the manifest list and returns the digest
+// of the added instance.
+func (m *ManifestList) Add(ctx context.Context, name string, options *ManifestListAddOptions) (digest.Digest, error) {
+ if options == nil {
+ options = &ManifestListAddOptions{}
+ }
+
+ ref, err := alltransports.ParseImageName(name)
+ if err != nil {
+ withDocker := fmt.Sprintf("%s://%s", docker.Transport.Name(), name)
+ ref, err = alltransports.ParseImageName(withDocker)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ // Now massage in the copy-related options into the system context.
+ systemContext := m.image.runtime.systemContextCopy()
+ if options.AuthFilePath != "" {
+ systemContext.AuthFilePath = options.AuthFilePath
+ }
+ if options.CertDirPath != "" {
+ systemContext.DockerCertPath = options.CertDirPath
+ }
+ if options.InsecureSkipTLSVerify != types.OptionalBoolUndefined {
+ systemContext.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify
+ systemContext.OCIInsecureSkipTLSVerify = options.InsecureSkipTLSVerify == types.OptionalBoolTrue
+ systemContext.DockerDaemonInsecureSkipTLSVerify = options.InsecureSkipTLSVerify == types.OptionalBoolTrue
+ }
+ if options.Username != "" {
+ systemContext.DockerAuthConfig = &types.DockerAuthConfig{
+ Username: options.Username,
+ Password: options.Password,
+ }
+ }
+
+ newDigest, err := m.list.Add(ctx, systemContext, ref, options.All)
+ if err != nil {
+ return "", err
+ }
+
+ // Write the changes to disk.
+ if err := m.saveAndReload(); err != nil {
+ return "", err
+ }
+ return newDigest, nil
+}
+
+// Options for annotationg a manifest list.
+type ManifestListAnnotateOptions struct {
+ // Add the specified annotations to the added image.
+ Annotations map[string]string
+ // Add the specified architecture to the added image.
+ Architecture string
+ // Add the specified features to the added image.
+ Features []string
+ // Add the specified OS to the added image.
+ OS string
+ // Add the specified OS features to the added image.
+ OSFeatures []string
+ // Add the specified OS version to the added image.
+ OSVersion string
+ // Add the specified variant to the added image.
+ Variant string
+}
+
+// Annotate an image instance specified by `d` in the manifest list.
+func (m *ManifestList) AnnotateInstance(d digest.Digest, options *ManifestListAnnotateOptions) error {
+ if options == nil {
+ return nil
+ }
+
+ if len(options.OS) > 0 {
+ if err := m.list.SetOS(d, options.OS); err != nil {
+ return err
+ }
+ }
+ if len(options.OSVersion) > 0 {
+ if err := m.list.SetOSVersion(d, options.OSVersion); err != nil {
+ return err
+ }
+ }
+ if len(options.Features) > 0 {
+ if err := m.list.SetFeatures(d, options.Features); err != nil {
+ return err
+ }
+ }
+ if len(options.OSFeatures) > 0 {
+ if err := m.list.SetOSFeatures(d, options.OSFeatures); err != nil {
+ return err
+ }
+ }
+ if len(options.Architecture) > 0 {
+ if err := m.list.SetArchitecture(d, options.Architecture); err != nil {
+ return err
+ }
+ }
+ if len(options.Variant) > 0 {
+ if err := m.list.SetVariant(d, options.Variant); err != nil {
+ return err
+ }
+ }
+ if len(options.Annotations) > 0 {
+ if err := m.list.SetAnnotations(&d, options.Annotations); err != nil {
+ return err
+ }
+ }
+
+ // Write the changes to disk.
+ if err := m.saveAndReload(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// RemoveInstance removes the instance specified by `d` from the manifest list.
+// Returns the new ID of the image.
+func (m *ManifestList) RemoveInstance(d digest.Digest) error {
+ if err := m.list.Remove(d); err != nil {
+ return err
+ }
+
+ // Write the changes to disk.
+ if err := m.saveAndReload(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ManifestListPushOptions allow for customizing pushing a manifest list.
+type ManifestListPushOptions struct {
+ CopyOptions
+
+ // For tweaking the list selection.
+ ImageListSelection imageCopy.ImageListSelection
+ // Use when selecting only specific imags.
+ Instances []digest.Digest
+}
+
+// Push pushes a manifest to the specified destination.
+func (m *ManifestList) Push(ctx context.Context, destination string, options *ManifestListPushOptions) (digest.Digest, error) {
+ if options == nil {
+ options = &ManifestListPushOptions{}
+ }
+
+ dest, err := alltransports.ParseImageName(destination)
+ if err != nil {
+ oldErr := err
+ dest, err = alltransports.ParseImageName("docker://" + destination)
+ if err != nil {
+ return "", oldErr
+ }
+ }
+
+ // NOTE: we're using the logic in copier to create a proper
+ // types.SystemContext. This prevents us from having an error prone
+ // code duplicate here.
+ copier, err := m.image.runtime.newCopier(&options.CopyOptions)
+ if err != nil {
+ return "", err
+ }
+ defer copier.close()
+
+ pushOptions := manifests.PushOptions{
+ Store: m.image.runtime.store,
+ SystemContext: copier.systemContext,
+ ImageListSelection: options.ImageListSelection,
+ Instances: options.Instances,
+ ReportWriter: options.Writer,
+ SignBy: options.SignBy,
+ RemoveSignatures: options.RemoveSignatures,
+ ManifestType: options.ManifestMIMEType,
+ }
+
+ _, d, err := m.list.Push(ctx, dest, pushOptions)
+ return d, err
+}
diff --git a/vendor/github.com/containers/buildah/manifests/copy.go b/vendor/github.com/containers/common/libimage/manifests/copy.go
index 7e651a46c..7e651a46c 100644
--- a/vendor/github.com/containers/buildah/manifests/copy.go
+++ b/vendor/github.com/containers/common/libimage/manifests/copy.go
diff --git a/vendor/github.com/containers/buildah/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go
index 0fe7e477b..875c2948d 100644
--- a/vendor/github.com/containers/buildah/manifests/manifests.go
+++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go
@@ -6,8 +6,8 @@ import (
stderrors "errors"
"io"
- "github.com/containers/buildah/pkg/manifests"
- "github.com/containers/buildah/pkg/supplemented"
+ "github.com/containers/common/pkg/manifests"
+ "github.com/containers/common/pkg/supplemented"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go
new file mode 100644
index 000000000..03d2456de
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/normalize.go
@@ -0,0 +1,92 @@
+package libimage
+
+import (
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/pkg/errors"
+)
+
+// NormalizeName normalizes the provided name according to the conventions by
+// Podman and Buildah. If tag and digest are missing, the "latest" tag will be
+// used. If it's a short name, it will be prefixed with "localhost/".
+//
+// References to docker.io are normalized according to the Docker conventions.
+// For instance, "docker.io/foo" turns into "docker.io/library/foo".
+func NormalizeName(name string) (reference.Named, error) {
+ // NOTE: this code is in symmetrie with containers/image/pkg/shortnames.
+ ref, err := reference.Parse(name)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error normalizing name %q", name)
+ }
+
+ named, ok := ref.(reference.Named)
+ if !ok {
+ return nil, errors.Errorf("%q is not a named reference", name)
+ }
+
+ // Enforce "localhost" if needed.
+ registry := reference.Domain(named)
+ if !(strings.ContainsAny(registry, ".:") || registry == "localhost") {
+ name = toLocalImageName(ref.String())
+ }
+
+ // Another parse which also makes sure that docker.io references are
+ // correctly normalized (e.g., docker.io/alpine to
+ // docker.io/library/alpine).
+ named, err = reference.ParseNormalizedNamed(name)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, hasTag := named.(reference.NamedTagged); hasTag {
+ return named, nil
+ }
+ if _, hasDigest := named.(reference.Digested); hasDigest {
+ return named, nil
+ }
+
+ // Make sure to tag "latest".
+ return reference.TagNameOnly(named), nil
+}
+
+// prefix the specified name with "localhost/".
+func toLocalImageName(name string) string {
+ return "localhost/" + strings.TrimLeft(name, "/")
+}
+
+// NameTagPair represents a RepoTag of an image.
+type NameTagPair struct {
+ // Name of the RepoTag. Maybe "<none>".
+ Name string
+ // Tag of the RepoTag. Maybe "<none>".
+ Tag string
+
+ // for internal use
+ named reference.Named
+}
+
+// ToNameTagsPairs splits repoTags into name&tag pairs.
+// Guaranteed to return at least one pair.
+func ToNameTagPairs(repoTags []reference.Named) ([]NameTagPair, error) {
+ none := "<none>"
+
+ var pairs []NameTagPair
+ for i, named := range repoTags {
+ pair := NameTagPair{
+ Name: named.Name(),
+ Tag: none,
+ named: repoTags[i],
+ }
+
+ if tagged, isTagged := named.(reference.NamedTagged); isTagged {
+ pair.Tag = tagged.Tag()
+ }
+ pairs = append(pairs, pair)
+ }
+
+ if len(pairs) == 0 {
+ pairs = append(pairs, NameTagPair{Name: none, Tag: none})
+ }
+ return pairs, nil
+}
diff --git a/vendor/github.com/containers/common/libimage/oci.go b/vendor/github.com/containers/common/libimage/oci.go
new file mode 100644
index 000000000..b88d6613d
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/oci.go
@@ -0,0 +1,97 @@
+package libimage
+
+import (
+ "context"
+
+ ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// toOCI returns the image as OCI v1 image.
+func (i *Image) toOCI(ctx context.Context) (*ociv1.Image, error) {
+ if i.cached.ociv1Image != nil {
+ return i.cached.ociv1Image, nil
+ }
+ ref, err := i.StorageReference()
+ if err != nil {
+ return nil, err
+ }
+
+ img, err := ref.NewImage(ctx, i.runtime.systemContextCopy())
+ if err != nil {
+ return nil, err
+ }
+ defer img.Close()
+
+ return img.OCIConfig(ctx)
+}
+
+// historiesMatch returns the number of entries in the histories which have the
+// same contents
+func historiesMatch(a, b []ociv1.History) int {
+ i := 0
+ for i < len(a) && i < len(b) {
+ if a[i].Created != nil && b[i].Created == nil {
+ return i
+ }
+ if a[i].Created == nil && b[i].Created != nil {
+ return i
+ }
+ if a[i].Created != nil && b[i].Created != nil {
+ if !a[i].Created.Equal(*(b[i].Created)) {
+ return i
+ }
+ }
+ if a[i].CreatedBy != b[i].CreatedBy {
+ return i
+ }
+ if a[i].Author != b[i].Author {
+ return i
+ }
+ if a[i].Comment != b[i].Comment {
+ return i
+ }
+ if a[i].EmptyLayer != b[i].EmptyLayer {
+ return i
+ }
+ i++
+ }
+ return i
+}
+
+// areParentAndChild checks diff ID and history in the two images and return
+// true if the second should be considered to be directly based on the first
+func areParentAndChild(parent, child *ociv1.Image) bool {
+ // the child and candidate parent should share all of the
+ // candidate parent's diff IDs, which together would have
+ // controlled which layers were used
+
+ // Both, child and parent, may be nil when the storage is left in an
+ // incoherent state. Issue #7444 describes such a case when a build
+ // has been killed.
+ if child == nil || parent == nil {
+ return false
+ }
+
+ if len(parent.RootFS.DiffIDs) > len(child.RootFS.DiffIDs) {
+ return false
+ }
+ childUsesCandidateDiffs := true
+ for i := range parent.RootFS.DiffIDs {
+ if child.RootFS.DiffIDs[i] != parent.RootFS.DiffIDs[i] {
+ childUsesCandidateDiffs = false
+ break
+ }
+ }
+ if !childUsesCandidateDiffs {
+ return false
+ }
+ // the child should have the same history as the parent, plus
+ // one more entry
+ if len(parent.History)+1 != len(child.History) {
+ return false
+ }
+ if historiesMatch(parent.History, child.History) != len(parent.History) {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go
new file mode 100644
index 000000000..b92a5e15e
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/pull.go
@@ -0,0 +1,458 @@
+package libimage
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/containers/common/pkg/config"
+ dirTransport "github.com/containers/image/v5/directory"
+ dockerTransport "github.com/containers/image/v5/docker"
+ dockerArchiveTransport "github.com/containers/image/v5/docker/archive"
+ "github.com/containers/image/v5/docker/reference"
+ ociArchiveTransport "github.com/containers/image/v5/oci/archive"
+ ociTransport "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/pkg/shortnames"
+ storageTransport "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// PullOptions allows for custommizing image pulls.
+type PullOptions struct {
+ CopyOptions
+
+ // If true, all tags of the image will be pulled from the container
+ // registry. Only supported for the docker transport.
+ AllTags bool
+}
+
+// Pull pulls the specified name. Name may refer to any of the supported
+// transports from github.com/containers/image. If no transport is encoded,
+// name will be treated as a reference to a registry (i.e., docker transport).
+//
+// Note that pullPolicy is only used when pulling from a container registry but
+// it *must* be different than the default value `config.PullPolicyUnsupported`. This
+// way, callers are forced to decide on the pull behaviour. The reasoning
+// behind is that some (commands of some) tools have different default pull
+// policies (e.g., buildah-bud versus podman-build). Making the pull-policy
+// choice explicit is an attempt to prevent silent regressions.
+//
+// The errror is storage.ErrImageUnknown iff the pull policy is set to "never"
+// and no local image has been found. This allows for an easier integration
+// into some users of this package (e.g., Buildah).
+func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullPolicy, options *PullOptions) ([]*Image, error) {
+ logrus.Debugf("Pulling image %s (policy: %s)", name, pullPolicy)
+
+ if options == nil {
+ options = &PullOptions{}
+ }
+
+ ref, err := alltransports.ParseImageName(name)
+ if err != nil {
+ // If the image clearly refers to a local one, we can look it up directly.
+ // In fact, we need to since they are not parseable.
+ if strings.HasPrefix(name, "sha256:") || (len(name) == 64 && !strings.Contains(name, "/.:@")) {
+ if pullPolicy == config.PullPolicyAlways {
+ return nil, errors.Errorf("pull policy is always but image has been referred to by ID (%s)", name)
+ }
+ local, _, err := r.LookupImage(name, nil)
+ if err != nil {
+ return nil, err
+ }
+ return []*Image{local}, err
+ }
+
+ // If the input does not include a transport assume it refers
+ // to a registry.
+ dockerRef, dockerErr := alltransports.ParseImageName("docker://" + name)
+ if dockerErr != nil {
+ return nil, err
+ }
+ ref = dockerRef
+ }
+
+ if options.AllTags && ref.Transport().Name() != dockerTransport.Transport.Name() {
+ return nil, errors.Errorf("pulling all tags is not supported for %s transport", ref.Transport().Name())
+ }
+
+ var (
+ pulledImages []string
+ pullError error
+ )
+
+ // Dispatch the copy operation.
+ switch ref.Transport().Name() {
+
+ // DOCKER/REGISTRY
+ case dockerTransport.Transport.Name():
+ pulledImages, pullError = r.copyFromRegistry(ctx, ref, strings.TrimPrefix(name, "docker://"), pullPolicy, options)
+
+ // DOCKER ARCHIVE
+ case dockerArchiveTransport.Transport.Name():
+ pulledImages, pullError = r.copyFromDockerArchive(ctx, ref, &options.CopyOptions)
+
+ // OCI
+ case ociTransport.Transport.Name():
+ pulledImages, pullError = r.copyFromDefault(ctx, ref, &options.CopyOptions)
+
+ // OCI ARCHIVE
+ case ociArchiveTransport.Transport.Name():
+ pulledImages, pullError = r.copyFromDefault(ctx, ref, &options.CopyOptions)
+
+ // DIR
+ case dirTransport.Transport.Name():
+ pulledImages, pullError = r.copyFromDefault(ctx, ref, &options.CopyOptions)
+
+ // UNSUPPORTED
+ default:
+ return nil, errors.Errorf("unsupported transport %q for pulling", ref.Transport().Name())
+ }
+
+ if pullError != nil {
+ return nil, pullError
+ }
+
+ localImages := []*Image{}
+ lookupOptions := &LookupImageOptions{IgnorePlatform: true}
+ for _, name := range pulledImages {
+ local, _, err := r.LookupImage(name, lookupOptions)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error locating pulled image %q name in containers storage", name)
+ }
+ localImages = append(localImages, local)
+ }
+
+ return localImages, pullError
+}
+
+// copyFromDefault is the default copier for a number of transports. Other
+// transports require some specific dancing, sometimes Yoga.
+func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) {
+ c, err := r.newCopier(options)
+ if err != nil {
+ return nil, err
+ }
+ defer c.close()
+
+ // Figure out a name for the storage destination.
+ var storageName, imageName string
+ switch ref.Transport().Name() {
+
+ case ociTransport.Transport.Name():
+ split := strings.SplitN(ref.StringWithinTransport(), ":", 2)
+ storageName = toLocalImageName(split[0])
+ imageName = storageName
+
+ case ociArchiveTransport.Transport.Name():
+ manifest, err := ociArchiveTransport.LoadManifestDescriptor(ref)
+ if err != nil {
+ return nil, err
+ }
+ // if index.json has no reference name, compute the image digest instead
+ if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" {
+ storageName, err = getImageDigest(ctx, ref, nil)
+ if err != nil {
+ return nil, err
+ }
+ imageName = "sha256:" + storageName[1:]
+ } else {
+ storageName = manifest.Annotations["org.opencontainers.image.ref.name"]
+ imageName = storageName
+ }
+
+ default:
+ storageName = toLocalImageName(ref.StringWithinTransport())
+ imageName = storageName
+ }
+
+ // Create a storage reference.
+ destRef, err := storageTransport.Transport.ParseStoreReference(r.store, storageName)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = c.copy(ctx, ref, destRef)
+ return []string{imageName}, err
+}
+
+// storageReferencesFromArchiveReader returns a slice of image references inside the
+// archive reader. A docker archive may include more than one image and this
+// method allows for extracting them into containers storage references which
+// can later be used from copying.
+func (r *Runtime) storageReferencesReferencesFromArchiveReader(ctx context.Context, readerRef types.ImageReference, reader *dockerArchiveTransport.Reader) ([]types.ImageReference, []string, error) {
+ destNames, err := reader.ManifestTagsForReference(readerRef)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var imageNames []string
+ if len(destNames) == 0 {
+ destName, err := getImageDigest(ctx, readerRef, &r.systemContext)
+ if err != nil {
+ return nil, nil, err
+ }
+ destNames = append(destNames, destName)
+ // Make sure the image can be loaded after the pull by
+ // replacing the @ with sha256:.
+ imageNames = append(imageNames, "sha256:"+destName[1:])
+ } else {
+ for i := range destNames {
+ ref, err := NormalizeName(destNames[i])
+ if err != nil {
+ return nil, nil, err
+ }
+ destNames[i] = ref.String()
+ }
+ imageNames = destNames
+ }
+
+ references := []types.ImageReference{}
+ for _, destName := range destNames {
+ destRef, err := storageTransport.Transport.ParseStoreReference(r.store, destName)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error parsing dest reference name %#v", destName)
+ }
+ references = append(references, destRef)
+ }
+
+ return references, imageNames, nil
+}
+
+// copyFromDockerArchive copies one image from the specified reference.
+func (r *Runtime) copyFromDockerArchive(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) {
+ // There may be more than one image inside the docker archive, so we
+ // need a quick glimpse inside.
+ reader, readerRef, err := dockerArchiveTransport.NewReaderForReference(&r.systemContext, ref)
+ if err != nil {
+ return nil, err
+ }
+
+ return r.copyFromDockerArchiveReaderReference(ctx, reader, readerRef, options)
+}
+
+// copyFromDockerArchiveReaderReference copies the specified readerRef from reader.
+func (r *Runtime) copyFromDockerArchiveReaderReference(ctx context.Context, reader *dockerArchiveTransport.Reader, readerRef types.ImageReference, options *CopyOptions) ([]string, error) {
+ c, err := r.newCopier(options)
+ if err != nil {
+ return nil, err
+ }
+ defer c.close()
+
+ // Get a slice of storage references we can copy.
+ references, destNames, err := r.storageReferencesReferencesFromArchiveReader(ctx, readerRef, reader)
+ if err != nil {
+ return nil, err
+ }
+
+ // Now copy all of the images. Use readerRef for performance.
+ for _, destRef := range references {
+ if _, err := c.copy(ctx, readerRef, destRef); err != nil {
+ return nil, err
+ }
+ }
+
+ return destNames, nil
+}
+
+// copyFromRegistry pulls the specified, possibly unqualified, name from a
+// registry. On successful pull it returns the used fully-qualified name that
+// can later be used to look up the image in the local containers storage.
+//
+// If options.All is set, all tags from the specified registry will be pulled.
+func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference, inputName string, pullPolicy config.PullPolicy, options *PullOptions) ([]string, error) {
+ // Sanity check.
+ if err := pullPolicy.Validate(); err != nil {
+ return nil, err
+ }
+
+ if !options.AllTags {
+ return r.copySingleImageFromRegistry(ctx, inputName, pullPolicy, options)
+ }
+
+ named := reference.TrimNamed(ref.DockerReference())
+ tags, err := dockerTransport.GetRepositoryTags(ctx, &r.systemContext, ref)
+ if err != nil {
+ return nil, err
+ }
+
+ pulledTags := []string{}
+ for _, tag := range tags {
+ select { // Let's be gentle with Podman remote.
+ case <-ctx.Done():
+ return nil, errors.Errorf("pulling cancelled")
+ default:
+ // We can continue.
+ }
+ tagged, err := reference.WithTag(named, tag)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating tagged reference (name %s, tag %s)", named.String(), tag)
+ }
+ pulled, err := r.copySingleImageFromRegistry(ctx, tagged.String(), pullPolicy, options)
+ if err != nil {
+ return nil, err
+ }
+ pulledTags = append(pulledTags, pulled...)
+ }
+
+ return pulledTags, nil
+}
+
+// copySingleImageFromRegistry pulls the specified, possibly unqualified, name
+// from a registry. On successful pull it returns the used fully-qualified
+// name that can later be used to look up the image in the local containers
+// storage.
+func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName string, pullPolicy config.PullPolicy, options *PullOptions) ([]string, error) {
+ // Sanity check.
+ if err := pullPolicy.Validate(); err != nil {
+ return nil, err
+ }
+
+ var (
+ localImage *Image
+ resolvedImageName string
+ err error
+ )
+
+ // Always check if there's a local image. If, we should use it's
+ // resolved name for pulling. Assume we're doing a `pull foo`.
+ // If there's already a local image "localhost/foo", then we should
+ // attempt pulling that instead of doing the full short-name dance.
+ localImage, resolvedImageName, err = r.LookupImage(imageName, nil)
+ if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
+ return nil, errors.Wrap(err, "error looking up local image")
+ }
+
+ if pullPolicy == config.PullPolicyNever {
+ if localImage != nil {
+ logrus.Debugf("Pull policy %q but no local image has been found for %s", pullPolicy, imageName)
+ return []string{resolvedImageName}, nil
+ }
+ logrus.Debugf("Pull policy %q and %s resolved to local image %s", pullPolicy, imageName, resolvedImageName)
+ return nil, errors.Wrap(storage.ErrImageUnknown, imageName)
+ }
+
+ if pullPolicy == config.PullPolicyMissing && localImage != nil {
+ return []string{resolvedImageName}, nil
+ }
+
+ // If we looked up the image by ID, we cannot really pull from anywhere.
+ if localImage != nil && strings.HasPrefix(localImage.ID(), imageName) {
+ switch pullPolicy {
+ case config.PullPolicyAlways:
+ return nil, errors.Errorf("pull policy is always but image has been referred to by ID (%s)", imageName)
+ default:
+ return []string{resolvedImageName}, nil
+ }
+ }
+
+ // If we found a local image, we should use it's locally resolved name
+ // (see containers/buildah #2904).
+ if localImage != nil {
+ if imageName != resolvedImageName {
+ logrus.Debugf("Image %s resolved to local image %s which will be used for pulling", imageName, resolvedImageName)
+ }
+ imageName = resolvedImageName
+ }
+
+ sys := r.systemContextCopy()
+ resolved, err := shortnames.Resolve(sys, imageName)
+ if err != nil {
+ return nil, err
+ }
+
+ // NOTE: Below we print the description from the short-name resolution.
+ // In theory we could print it here. In practice, however, this is
+ // causing a hard time for Buildah uses who are doing a `buildah from
+ // image` and expect just the container name to be printed if the image
+ // is present locally.
+ // The pragmatic solution is to only print the description when we found
+ // a _newer_ image that we're about to pull.
+ wroteDesc := false
+ writeDesc := func() error {
+ if wroteDesc {
+ return nil
+ }
+ wroteDesc = true
+ if desc := resolved.Description(); len(desc) > 0 {
+ logrus.Debug(desc)
+ if options.Writer != nil {
+ if _, err := options.Writer.Write([]byte(desc + "\n")); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ }
+
+ c, err := r.newCopier(&options.CopyOptions)
+ if err != nil {
+ return nil, err
+ }
+ defer c.close()
+
+ var pullErrors []error
+ for _, candidate := range resolved.PullCandidates {
+ candidateString := candidate.Value.String()
+ logrus.Debugf("Attempting to pull candidate %s for %s", candidateString, imageName)
+ srcRef, err := dockerTransport.NewReference(candidate.Value)
+ if err != nil {
+ return nil, err
+ }
+
+ if pullPolicy == config.PullPolicyNewer && localImage != nil {
+ isNewer, err := localImage.HasDifferentDigest(ctx, srcRef)
+ if err != nil {
+ pullErrors = append(pullErrors, err)
+ continue
+ }
+
+ if !isNewer {
+ logrus.Debugf("Skipping pull candidate %s as the image is not newer (pull policy %s)", candidateString, pullPolicy)
+ continue
+ }
+ }
+
+ destRef, err := storageTransport.Transport.ParseStoreReference(r.store, candidate.Value.String())
+ if err != nil {
+ return nil, err
+ }
+
+ if err := writeDesc(); err != nil {
+ return nil, err
+ }
+ if options.Writer != nil {
+ if _, err := io.WriteString(options.Writer, fmt.Sprintf("Trying to pull %s...\n", candidateString)); err != nil {
+ return nil, err
+ }
+ }
+ if _, err := c.copy(ctx, srcRef, destRef); err != nil {
+ logrus.Debugf("Error pulling candidate %s: %v", candidateString, err)
+ pullErrors = append(pullErrors, err)
+ continue
+ }
+ if err := candidate.Record(); err != nil {
+ // Only log the recording errors. Podman has seen
+ // reports where users set most of the system to
+ // read-only which can cause issues.
+ logrus.Errorf("Error recording short-name alias %q: %v", candidateString, err)
+ }
+
+ logrus.Debugf("Pulled candidate %s successfully", candidateString)
+ return []string{candidate.Value.String()}, nil
+ }
+
+ if localImage != nil && pullPolicy == config.PullPolicyNewer {
+ return []string{resolvedImageName}, nil
+ }
+
+ if len(pullErrors) == 0 {
+ return nil, errors.Errorf("internal error: no image pulled (pull policy %s)", pullPolicy)
+ }
+
+ return nil, resolved.FormatPullErrors(pullErrors)
+}
diff --git a/vendor/github.com/containers/common/libimage/push.go b/vendor/github.com/containers/common/libimage/push.go
new file mode 100644
index 000000000..8ff5d5ffd
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/push.go
@@ -0,0 +1,83 @@
+package libimage
+
+import (
+ "context"
+
+ dockerArchiveTransport "github.com/containers/image/v5/docker/archive"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/sirupsen/logrus"
+)
+
+// PushOptions allows for custommizing image pushes.
+type PushOptions struct {
+ CopyOptions
+}
+
+// Push pushes the specified source which must refer to an image in the local
+// containers storage. It may or may not have the `containers-storage:`
+// prefix. Use destination to push to a custom destination. The destination
+// can refer to any supported transport. If not transport is specified, the
+// docker transport (i.e., a registry) is implied. If destination is left
+// empty, the docker destination will be extrapolated from the source.
+//
+// Return storage.ErrImageUnknown if source could not be found in the local
+// containers storage.
+func (r *Runtime) Push(ctx context.Context, source, destination string, options *PushOptions) ([]byte, error) {
+ if options == nil {
+ options = &PushOptions{}
+ }
+
+ // Look up the local image.
+ image, resolvedSource, err := r.LookupImage(source, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ srcRef, err := image.StorageReference()
+ if err != nil {
+ return nil, err
+ }
+
+ // Make sure we have a proper destination, and parse it into an image
+ // reference for copying.
+ if destination == "" {
+ // Doing an ID check here is tempting but false positives (due
+ // to a short partial IDs) are more painful than false
+ // negatives.
+ destination = resolvedSource
+ }
+
+ logrus.Debugf("Pushing image %s to %s", source, destination)
+
+ destRef, err := alltransports.ParseImageName(destination)
+ if err != nil {
+ // If the input does not include a transport assume it refers
+ // to a registry.
+ dockerRef, dockerErr := alltransports.ParseImageName("docker://" + destination)
+ if dockerErr != nil {
+ return nil, err
+ }
+ destRef = dockerRef
+ }
+
+ // Buildah compat: Make sure to tag the destination image if it's a
+ // Docker archive. This way, we preseve the image name.
+ if destRef.Transport().Name() == dockerArchiveTransport.Transport.Name() {
+ if named, err := reference.ParseNamed(resolvedSource); err == nil {
+ tagged, isTagged := named.(reference.NamedTagged)
+ if isTagged {
+ options.dockerArchiveAdditionalTags = []reference.NamedTagged{tagged}
+ }
+ }
+ }
+
+ c, err := r.newCopier(&options.CopyOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ defer c.close()
+
+ return c.copy(ctx, srcRef, destRef)
+}
diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go
new file mode 100644
index 000000000..4e6bd2cf2
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/runtime.go
@@ -0,0 +1,573 @@
+package libimage
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/pkg/shortnames"
+ storageTransport "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ deepcopy "github.com/jinzhu/copier"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// RuntimeOptions allow for creating a customized Runtime.
+type RuntimeOptions struct {
+ SystemContext *types.SystemContext
+}
+
+// setRegistriesConfPath sets the registries.conf path for the specified context.
+func setRegistriesConfPath(systemContext *types.SystemContext) {
+ if systemContext.SystemRegistriesConfPath != "" {
+ return
+ }
+ if envOverride, ok := os.LookupEnv("CONTAINERS_REGISTRIES_CONF"); ok {
+ systemContext.SystemRegistriesConfPath = envOverride
+ return
+ }
+ if envOverride, ok := os.LookupEnv("REGISTRIES_CONFIG_PATH"); ok {
+ systemContext.SystemRegistriesConfPath = envOverride
+ return
+ }
+}
+
+// Runtime is responsible for image management and storing them in a containers
+// storage.
+type Runtime struct {
+ // Underlying storage store.
+ store storage.Store
+ // Global system context. No pointer to simplify copying and modifying
+ // it.
+ systemContext types.SystemContext
+}
+
+// Returns a copy of the runtime's system context.
+func (r *Runtime) systemContextCopy() *types.SystemContext {
+ var sys types.SystemContext
+ deepcopy.Copy(&sys, &r.systemContext)
+ return &sys
+}
+
+// RuntimeFromStore returns a Runtime for the specified store.
+func RuntimeFromStore(store storage.Store, options *RuntimeOptions) (*Runtime, error) {
+ if options == nil {
+ options = &RuntimeOptions{}
+ }
+
+ var systemContext types.SystemContext
+ if options.SystemContext != nil {
+ systemContext = *options.SystemContext
+ } else {
+ systemContext = types.SystemContext{}
+ }
+
+ setRegistriesConfPath(&systemContext)
+
+ if systemContext.BlobInfoCacheDir == "" {
+ systemContext.BlobInfoCacheDir = filepath.Join(store.GraphRoot(), "cache")
+ }
+
+ return &Runtime{
+ store: store,
+ systemContext: systemContext,
+ }, nil
+}
+
+// RuntimeFromStoreOptions returns a return for the specified store options.
+func RuntimeFromStoreOptions(runtimeOptions *RuntimeOptions, storeOptions *storage.StoreOptions) (*Runtime, error) {
+ if storeOptions == nil {
+ storeOptions = &storage.StoreOptions{}
+ }
+ store, err := storage.GetStore(*storeOptions)
+ if err != nil {
+ return nil, err
+ }
+ storageTransport.Transport.SetStore(store)
+ return RuntimeFromStore(store, runtimeOptions)
+}
+
+// Shutdown attempts to free any kernel resources which are being used by the
+// underlying driver. If "force" is true, any mounted (i.e., in use) layers
+// are unmounted beforehand. If "force" is not true, then layers being in use
+// is considered to be an error condition.
+func (r *Runtime) Shutdown(force bool) error {
+ _, err := r.store.Shutdown(force)
+ return err
+}
+
+// storageToImage transforms a storage.Image to an Image.
+func (r *Runtime) storageToImage(storageImage *storage.Image, ref types.ImageReference) *Image {
+ return &Image{
+ runtime: r,
+ storageImage: storageImage,
+ storageReference: ref,
+ }
+}
+
+// Exists returns true if the specicifed image exists in the local containers
+// storage.
+func (r *Runtime) Exists(name string) (bool, error) {
+ image, _, err := r.LookupImage(name, &LookupImageOptions{IgnorePlatform: true})
+ if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
+ return false, err
+ }
+ return image != nil, nil
+}
+
+// LookupImageOptions allow for customizing local image lookups.
+type LookupImageOptions struct {
+ // If set, the image will be purely looked up by name. No matching to
+ // the current platform will be performed. This can be helpful when
+ // the platform does not matter, for instance, for image removal.
+ IgnorePlatform bool
+}
+
+// Lookup Image looks up `name` in the local container storage matching the
+// specified SystemContext. Returns the image and the name it has been found
+// with. Note that name may also use the `containers-storage:` prefix used to
+// refer to the containers-storage transport. Returns storage.ErrImageUnknown
+// if the image could not be found.
+//
+// If the specified name uses the `containers-storage` transport, the resolved
+// name is empty.
+func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, string, error) {
+ logrus.Debugf("Looking up image %q in local containers storage", name)
+
+ if options == nil {
+ options = &LookupImageOptions{}
+ }
+
+ // If needed extract the name sans transport.
+ storageRef, err := alltransports.ParseImageName(name)
+ if err == nil {
+ if storageRef.Transport().Name() != storageTransport.Transport.Name() {
+ return nil, "", errors.Errorf("unsupported transport %q for looking up local images", storageRef.Transport().Name())
+ }
+ img, err := storageTransport.Transport.GetStoreImage(r.store, storageRef)
+ if err != nil {
+ return nil, "", err
+ }
+ logrus.Debugf("Found image %q in local containers storage (%s)", name, storageRef.StringWithinTransport())
+ return r.storageToImage(img, storageRef), "", nil
+ }
+
+ originalName := name
+ idByDigest := false
+ if strings.HasPrefix(name, "sha256:") {
+ // Strip off the sha256 prefix so it can be parsed later on.
+ idByDigest = true
+ name = strings.TrimPrefix(name, "sha256:")
+ }
+
+ // First, check if we have an exact match in the storage. Maybe an ID
+ // or a fully-qualified image name.
+ img, err := r.lookupImageInLocalStorage(name, name, options)
+ if err != nil {
+ return nil, "", err
+ }
+ if img != nil {
+ return img, originalName, nil
+ }
+
+ // If the name clearly referred to a local image, there's nothing we can
+ // do anymore.
+ if storageRef != nil || idByDigest {
+ return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName)
+ }
+
+ // Second, try out the candidates as resolved by shortnames. This takes
+ // "localhost/" prefixed images into account as well.
+ candidates, err := shortnames.ResolveLocally(&r.systemContext, name)
+ if err != nil {
+ return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName)
+ }
+ // Backwards compat: normalize to docker.io as some users may very well
+ // rely on that.
+ if dockerNamed, err := reference.ParseDockerRef(name); err == nil {
+ candidates = append(candidates, dockerNamed)
+ }
+
+ for _, candidate := range candidates {
+ img, err := r.lookupImageInLocalStorage(name, candidate.String(), options)
+ if err != nil {
+ return nil, "", err
+ }
+ if img != nil {
+ return img, candidate.String(), err
+ }
+ }
+
+ return r.lookupImageInDigestsAndRepoTags(originalName, options)
+}
+
+// lookupImageInLocalStorage looks up the specified candidate for name in the
+// storage and checks whether it's matching the system context.
+func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *LookupImageOptions) (*Image, error) {
+ logrus.Debugf("Trying %q ...", candidate)
+ img, err := r.store.Image(candidate)
+ if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
+ return nil, err
+ }
+ if img == nil {
+ return nil, nil
+ }
+ ref, err := storageTransport.Transport.ParseStoreReference(r.store, img.ID)
+ if err != nil {
+ return nil, err
+ }
+
+ image := r.storageToImage(img, ref)
+ if options.IgnorePlatform {
+ logrus.Debugf("Found image %q as %q in local containers storage", name, candidate)
+ return image, nil
+ }
+
+ // If we referenced a manifest list, we need to check whether we can
+ // find a matching instance in the local containers storage.
+ isManifestList, err := image.IsManifestList(context.Background())
+ if err != nil {
+ return nil, err
+ }
+ if isManifestList {
+ manifestList, err := image.ToManifestList()
+ if err != nil {
+ return nil, err
+ }
+ image, err = manifestList.LookupInstance(context.Background(), "", "", "")
+ if err != nil {
+ return nil, err
+ }
+ ref, err = storageTransport.Transport.ParseStoreReference(r.store, "@"+image.ID())
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ matches, err := imageReferenceMatchesContext(context.Background(), ref, &r.systemContext)
+ if err != nil {
+ return nil, err
+ }
+
+ // NOTE: if the user referenced by ID we must optimistically assume
+ // that they know what they're doing. Given, we already did the
+ // manifest limbo above, we may already have resolved it.
+ if !matches && !strings.HasPrefix(image.ID(), candidate) {
+ return nil, nil
+ }
+ // Also print the string within the storage transport. That may aid in
+ // debugging when using additional stores since we see explicitly where
+ // the store is and which driver (options) are used.
+ logrus.Debugf("Found image %q as %q in local containers storage (%s)", name, candidate, ref.StringWithinTransport())
+ return image, nil
+}
+
+// lookupImageInDigestsAndRepoTags attempts to match name against any image in
+// the local containers storage. If name is digested, it will be compared
+// against image digests. Otherwise, it will be looked up in the repo tags.
+func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupImageOptions) (*Image, string, error) {
+ // Until now, we've tried very hard to find an image but now it is time
+ // for limbo. If the image includes a digest that we couldn't detect
+ // verbatim in the storage, we must have a look at all digests of all
+ // images. Those may change over time (e.g., via manifest lists).
+ // Both Podman and Buildah want us to do that dance.
+ allImages, err := r.ListImages(context.Background(), nil, nil)
+ if err != nil {
+ return nil, "", err
+ }
+
+ if !shortnames.IsShortName(name) {
+ named, err := reference.ParseNormalizedNamed(name)
+ if err != nil {
+ return nil, "", err
+ }
+ digested, hasDigest := named.(reference.Digested)
+ if !hasDigest {
+ return nil, "", errors.Wrap(storage.ErrImageUnknown, name)
+ }
+
+ logrus.Debug("Looking for image with matching recorded digests")
+ digest := digested.Digest()
+ for _, image := range allImages {
+ for _, d := range image.Digests() {
+ if d == digest {
+ return image, name, nil
+ }
+ }
+ }
+
+ return nil, "", errors.Wrap(storage.ErrImageUnknown, name)
+ }
+
+ // Podman compat: if we're looking for a short name but couldn't
+ // resolve it via the registries.conf dance, we need to look at *all*
+ // images and check if the name we're looking for matches a repo tag.
+ // Split the name into a repo/tag pair
+ split := strings.SplitN(name, ":", 2)
+ repo := split[0]
+ tag := ""
+ if len(split) == 2 {
+ tag = split[1]
+ }
+ for _, image := range allImages {
+ named, err := image.inRepoTags(repo, tag)
+ if err != nil {
+ return nil, "", err
+ }
+ if named == nil {
+ continue
+ }
+ img, err := r.lookupImageInLocalStorage(name, named.String(), options)
+ if err != nil {
+ return nil, "", err
+ }
+ if img != nil {
+ return img, named.String(), err
+ }
+ }
+
+ return nil, "", errors.Wrap(storage.ErrImageUnknown, name)
+}
+
+// ResolveName resolves the specified name. If the name resolves to a local
+// image, the fully resolved name will be returned. Otherwise, the name will
+// be properly normalized.
+//
+// Note that an empty string is returned as is.
+func (r *Runtime) ResolveName(name string) (string, error) {
+ if name == "" {
+ return "", nil
+ }
+ image, resolvedName, err := r.LookupImage(name, &LookupImageOptions{IgnorePlatform: true})
+ if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
+ return "", err
+ }
+
+ if image != nil && !strings.HasPrefix(image.ID(), resolvedName) {
+ return resolvedName, err
+ }
+
+ normalized, err := NormalizeName(name)
+ if err != nil {
+ return "", err
+ }
+
+ return normalized.String(), nil
+}
+
+// imageReferenceMatchesContext return true if the specified reference matches
+// the platform (os, arch, variant) as specified by the system context.
+func imageReferenceMatchesContext(ctx context.Context, ref types.ImageReference, sys *types.SystemContext) (bool, error) {
+ if sys == nil {
+ return true, nil
+ }
+ img, err := ref.NewImage(ctx, sys)
+ if err != nil {
+ return false, err
+ }
+ defer img.Close()
+ data, err := img.Inspect(ctx)
+ if err != nil {
+ return false, err
+ }
+ osChoice := sys.OSChoice
+ if osChoice == "" {
+ osChoice = runtime.GOOS
+ }
+ arch := sys.ArchitectureChoice
+ if arch == "" {
+ arch = runtime.GOARCH
+ }
+ if osChoice == data.Os && arch == data.Architecture {
+ if sys.VariantChoice == "" || sys.VariantChoice == data.Variant {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// ListImagesOptions allow for customizing listing images.
+type ListImagesOptions struct {
+ // Filters to filter the listed images. Supported filters are
+ // * after,before,since=image
+ // * dangling=true,false
+ // * intermediate=true,false (useful for pruning images)
+ // * id=id
+ // * label=key[=value]
+ // * readonly=true,false
+ // * reference=name[:tag] (wildcards allowed)
+ Filters []string
+}
+
+// ListImages lists images in the local container storage. If names are
+// specified, only images with the specified names are looked up and filtered.
+func (r *Runtime) ListImages(ctx context.Context, names []string, options *ListImagesOptions) ([]*Image, error) {
+ if options == nil {
+ options = &ListImagesOptions{}
+ }
+
+ var images []*Image
+ if len(names) > 0 {
+ lookupOpts := LookupImageOptions{IgnorePlatform: true}
+ for _, name := range names {
+ image, _, err := r.LookupImage(name, &lookupOpts)
+ if err != nil {
+ return nil, err
+ }
+ images = append(images, image)
+ }
+ } else {
+ storageImages, err := r.store.Images()
+ if err != nil {
+ return nil, err
+ }
+ for i := range storageImages {
+ images = append(images, r.storageToImage(&storageImages[i], nil))
+ }
+ }
+
+ var filters []filterFunc
+ if len(options.Filters) > 0 {
+ compiledFilters, err := r.compileImageFilters(ctx, options.Filters)
+ if err != nil {
+ return nil, err
+ }
+ filters = append(filters, compiledFilters...)
+ }
+
+ return filterImages(images, filters)
+}
+
+// RemoveImagesOptions allow for customizing image removal.
+type RemoveImagesOptions struct {
+ // Force will remove all containers from the local storage that are
+ // using a removed image. Use RemoveContainerFunc for a custom logic.
+ // If set, all child images will be removed as well.
+ Force bool
+ // RemoveContainerFunc allows for a custom logic for removing
+ // containers using a specific image. By default, all containers in
+ // the local containers storage will be removed (if Force is set).
+ RemoveContainerFunc RemoveContainerFunc
+ // Filters to filter the removed images. Supported filters are
+ // * after,before,since=image
+ // * dangling=true,false
+ // * intermediate=true,false (useful for pruning images)
+ // * id=id
+ // * label=key[=value]
+ // * readonly=true,false
+ // * reference=name[:tag] (wildcards allowed)
+ Filters []string
+ // The RemoveImagesReport will include the size of the removed image.
+ // This information may be useful when pruning images to figure out how
+ // much space was freed. However, computing the size of an image is
+ // comparatively expensive, so it is made optional.
+ WithSize bool
+}
+
+// RemoveImages removes images specified by names. All images are expected to
+// exist in the local containers storage.
+//
+// If an image has more names than one name, the image will be untagged with
+// the specified name. RemoveImages returns a slice of untagged and removed
+// images.
+//
+// Note that most errors are non-fatal and collected into `rmErrors` return
+// value.
+func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *RemoveImagesOptions) (reports []*RemoveImageReport, rmErrors []error) {
+ if options == nil {
+ options = &RemoveImagesOptions{}
+ }
+
+ // The logic here may require some explanation. Image removal is
+ // surprisingly complex since it is recursive (intermediate parents are
+ // removed) and since multiple items in `names` may resolve to the
+ // *same* image. On top, the data in the containers storage is shared,
+ // so we need to be careful and the code must be robust. That is why
+ // users can only remove images via this function; the logic may be
+ // complex but the execution path is clear.
+
+ // Bundle an image with a possible empty slice of names to untag. That
+ // allows for a decent untagging logic and to bundle multiple
+ // references to the same *Image (and circumvent consistency issues).
+ type deleteMe struct {
+ image *Image
+ referencedBy []string
+ }
+
+ appendError := func(err error) {
+ rmErrors = append(rmErrors, err)
+ }
+
+ orderedIDs := []string{} // determinism and relative order
+ deleteMap := make(map[string]*deleteMe) // ID -> deleteMe
+
+ // Look up images in the local containers storage and fill out
+ // orderedIDs and the deleteMap.
+ switch {
+ case len(names) > 0:
+ lookupOptions := LookupImageOptions{IgnorePlatform: true}
+ for _, name := range names {
+ img, resolvedName, err := r.LookupImage(name, &lookupOptions)
+ if err != nil {
+ appendError(err)
+ continue
+ }
+ dm, exists := deleteMap[img.ID()]
+ if !exists {
+ orderedIDs = append(orderedIDs, img.ID())
+ dm = &deleteMe{image: img}
+ deleteMap[img.ID()] = dm
+ }
+ dm.referencedBy = append(dm.referencedBy, resolvedName)
+ }
+ if len(orderedIDs) == 0 {
+ return nil, rmErrors
+ }
+
+ case len(options.Filters) > 0:
+ filteredImages, err := r.ListImages(ctx, nil, &ListImagesOptions{Filters: options.Filters})
+ if err != nil {
+ appendError(err)
+ return nil, rmErrors
+ }
+ for _, img := range filteredImages {
+ orderedIDs = append(orderedIDs, img.ID())
+ deleteMap[img.ID()] = &deleteMe{image: img}
+ }
+ }
+
+ // Now remove the images in the given order.
+ rmMap := make(map[string]*RemoveImageReport)
+ for _, id := range orderedIDs {
+ del, exists := deleteMap[id]
+ if !exists {
+ appendError(errors.Errorf("internal error: ID %s not in found in image-deletion map", id))
+ continue
+ }
+ if len(del.referencedBy) == 0 {
+ del.referencedBy = []string{""}
+ }
+ for _, ref := range del.referencedBy {
+ if err := del.image.remove(ctx, rmMap, ref, options); err != nil {
+ appendError(err)
+ continue
+ }
+ }
+ }
+
+ // Finally, we can assemble the reports slice.
+ for _, id := range orderedIDs {
+ report, exists := rmMap[id]
+ if exists {
+ reports = append(reports, report)
+ }
+ }
+
+ return reports, rmErrors
+}
diff --git a/vendor/github.com/containers/common/libimage/save.go b/vendor/github.com/containers/common/libimage/save.go
new file mode 100644
index 000000000..c03437682
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/save.go
@@ -0,0 +1,202 @@
+package libimage
+
+import (
+ "context"
+ "strings"
+
+ dirTransport "github.com/containers/image/v5/directory"
+ dockerArchiveTransport "github.com/containers/image/v5/docker/archive"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ ociArchiveTransport "github.com/containers/image/v5/oci/archive"
+ ociTransport "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/types"
+ ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// SaveOptions allow for customizing saving images.
+type SaveOptions struct {
+ CopyOptions
+
+ // AdditionalTags for the saved image. Incompatible when saving
+ // multiple images.
+ AdditionalTags []string
+}
+
+// Save saves one or more images indicated by `names` in the specified `format`
+// to `path`. Supported formats are oci-archive, docker-archive, oci-dir and
+// docker-dir. The latter two adhere to the dir transport in the corresponding
+// oci or docker v2s2 format. Please note that only docker-archive supports
+// saving more than one images. Other formats will yield an error attempting
+// to save more than one.
+func (r *Runtime) Save(ctx context.Context, names []string, format, path string, options *SaveOptions) error {
+ logrus.Debugf("Saving one more images (%s) to %q", names, path)
+
+ if options == nil {
+ options = &SaveOptions{}
+ }
+
+ // First some sanity checks to simplify subsequent code.
+ switch len(names) {
+ case 0:
+ return errors.New("no image specified for saving images")
+ case 1:
+ // All formats support saving 1.
+ default:
+ if format != "docker-archive" {
+ return errors.Errorf("unspported format %q for saving multiple images (only docker-archive)", format)
+ }
+ if len(options.AdditionalTags) > 0 {
+ return errors.Errorf("cannot save multiple images with multiple tags")
+ }
+ }
+
+ // Dispatch the save operations.
+ switch format {
+ case "oci-archive", "oci-dir", "docker-dir":
+ return r.saveSingleImage(ctx, names[0], format, path, options)
+
+ case "docker-archive":
+ return r.saveDockerArchive(ctx, names, path, options)
+ }
+
+ return errors.Errorf("unspported format %q for saving images", format)
+
+}
+
+// saveSingleImage saves the specified image name to the specified path.
+// Supported formats are "oci-archive", "oci-dir" and "docker-dir".
+func (r *Runtime) saveSingleImage(ctx context.Context, name, format, path string, options *SaveOptions) error {
+ image, imageName, err := r.LookupImage(name, nil)
+ if err != nil {
+ return err
+ }
+
+ // Unless the image was referenced by ID, use the resolved name as a
+ // tag.
+ var tag string
+ if !strings.HasPrefix(image.ID(), imageName) {
+ tag = imageName
+ }
+
+ srcRef, err := image.StorageReference()
+ if err != nil {
+ return err
+ }
+
+ // Prepare the destination reference.
+ var destRef types.ImageReference
+ switch format {
+ case "oci-archive":
+ destRef, err = ociArchiveTransport.NewReference(path, tag)
+
+ case "oci-dir":
+ destRef, err = ociTransport.NewReference(path, tag)
+ options.ManifestMIMEType = ociv1.MediaTypeImageManifest
+
+ case "docker-dir":
+ destRef, err = dirTransport.NewReference(path)
+ options.ManifestMIMEType = manifest.DockerV2Schema2MediaType
+
+ default:
+ return errors.Errorf("unspported format %q for saving images", format)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ c, err := r.newCopier(&options.CopyOptions)
+ if err != nil {
+ return err
+ }
+ defer c.close()
+
+ _, err = c.copy(ctx, srcRef, destRef)
+ return err
+}
+
+// saveDockerArchive saves the specified images indicated by names to the path.
+// It loads all images from the local containers storage and assembles the meta
+// data needed to properly save images. Since multiple names could refer to
+// the *same* image, we need to dance a bit and store additional "names".
+// Those can then be used as additional tags when copying.
+func (r *Runtime) saveDockerArchive(ctx context.Context, names []string, path string, options *SaveOptions) error {
+ type localImage struct {
+ image *Image
+ tags []reference.NamedTagged
+ }
+
+ orderedIDs := []string{} // to preserve the relative order
+ localImages := make(map[string]*localImage) // to assemble tags
+ visitedNames := make(map[string]bool) // filters duplicate names
+ for _, name := range names {
+ // Look up local images.
+ image, imageName, err := r.LookupImage(name, nil)
+ if err != nil {
+ return err
+ }
+ // Make sure to filter duplicates purely based on the resolved
+ // name.
+ if _, exists := visitedNames[imageName]; exists {
+ continue
+ }
+ visitedNames[imageName] = true
+ // Extract and assemble the data.
+ local, exists := localImages[image.ID()]
+ if !exists {
+ local = &localImage{image: image}
+ orderedIDs = append(orderedIDs, image.ID())
+ }
+ // Add the tag if the locally resolved name is properly tagged
+ // (which it should unless we looked it up by ID).
+ named, err := reference.ParseNamed(imageName)
+ if err == nil {
+ tagged, withTag := named.(reference.NamedTagged)
+ if withTag {
+ local.tags = append(local.tags, tagged)
+ }
+ }
+ localImages[image.ID()] = local
+ }
+
+ writer, err := dockerArchiveTransport.NewWriter(r.systemContextCopy(), path)
+ if err != nil {
+ return err
+ }
+ defer writer.Close()
+
+ for _, id := range orderedIDs {
+ local, exists := localImages[id]
+ if !exists {
+ return errors.Errorf("internal error: saveDockerArchive: ID %s not found in local map", id)
+ }
+
+ copyOpts := options.CopyOptions
+ copyOpts.dockerArchiveAdditionalTags = local.tags
+
+ c, err := r.newCopier(&copyOpts)
+ if err != nil {
+ return err
+ }
+ defer c.close()
+
+ destRef, err := writer.NewReference(nil)
+ if err != nil {
+ return err
+ }
+
+ srcRef, err := local.image.StorageReference()
+ if err != nil {
+ return err
+ }
+
+ if _, err := c.copy(ctx, srcRef, destRef); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go
new file mode 100644
index 000000000..b36b6d2a3
--- /dev/null
+++ b/vendor/github.com/containers/common/libimage/search.go
@@ -0,0 +1,307 @@
+package libimage
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+
+ dockerTransport "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/pkg/sysregistriesv2"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
+ "github.com/hashicorp/go-multierror"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sync/semaphore"
+)
+
+const (
+ searchTruncLength = 44
+ searchMaxQueries = 25
+ // Let's follow Firefox by limiting parallel downloads to 6. We do the
+ // same when pulling images in c/image.
+ searchMaxParallel = int64(6)
+)
+
+// SearchResult is holding image-search related data.
+type SearchResult struct {
+ // Index is the image index (e.g., "docker.io" or "quay.io")
+ Index string
+ // Name is the canonical name of the image (e.g., "docker.io/library/alpine").
+ Name string
+ // Description of the image.
+ Description string
+ // Stars is the number of stars of the image.
+ Stars int
+ // Official indicates if it's an official image.
+ Official string
+ // Automated indicates if the image was created by an automated build.
+ Automated string
+ // Tag is the image tag
+ Tag string
+}
+
+// SearchOptions customize searching images.
+type SearchOptions struct {
+ // Filter allows to filter the results.
+ Filter SearchFilter
+ // Limit limits the number of queries per index (default: 25). Must be
+ // greater than 0 to overwrite the default value.
+ Limit int
+ // NoTrunc avoids the output to be truncated.
+ NoTrunc bool
+ // Authfile is the path to the authentication file.
+ Authfile string
+ // InsecureSkipTLSVerify allows to skip TLS verification.
+ InsecureSkipTLSVerify types.OptionalBool
+ // ListTags returns the search result with available tags
+ ListTags bool
+}
+
+// SearchFilter allows filtering images while searching.
+type SearchFilter struct {
+ // Stars describes the minimal amount of starts of an image.
+ Stars int
+ // IsAutomated decides if only images from automated builds are displayed.
+ IsAutomated types.OptionalBool
+ // IsOfficial decides if only official images are displayed.
+ IsOfficial types.OptionalBool
+}
+
+// ParseSearchFilter turns the filter into a SearchFilter that can be used for
+// searching images.
+func ParseSearchFilter(filter []string) (*SearchFilter, error) {
+ sFilter := new(SearchFilter)
+ for _, f := range filter {
+ arr := strings.SplitN(f, "=", 2)
+ switch arr[0] {
+ case "stars":
+ if len(arr) < 2 {
+ return nil, errors.Errorf("invalid `stars` filter %q, should be stars=<value>", filter)
+ }
+ stars, err := strconv.Atoi(arr[1])
+ if err != nil {
+ return nil, errors.Wrapf(err, "incorrect value type for stars filter")
+ }
+ sFilter.Stars = stars
+ case "is-automated":
+ if len(arr) == 2 && arr[1] == "false" {
+ sFilter.IsAutomated = types.OptionalBoolFalse
+ } else {
+ sFilter.IsAutomated = types.OptionalBoolTrue
+ }
+ case "is-official":
+ if len(arr) == 2 && arr[1] == "false" {
+ sFilter.IsOfficial = types.OptionalBoolFalse
+ } else {
+ sFilter.IsOfficial = types.OptionalBoolTrue
+ }
+ default:
+ return nil, errors.Errorf("invalid filter type %q", f)
+ }
+ }
+ return sFilter, nil
+}
+
+func (r *Runtime) Search(ctx context.Context, term string, options *SearchOptions) ([]SearchResult, error) {
+ if options == nil {
+ options = &SearchOptions{}
+ }
+
+ var searchRegistries []string
+
+ // Try to extract a registry from the specified search term. We
+ // consider everything before the first slash to be the registry. Note
+ // that we cannot use the reference parser from the containers/image
+ // library as the search term may container arbitrary input such as
+ // wildcards. See bugzilla.redhat.com/show_bug.cgi?id=1846629.
+ if spl := strings.SplitN(term, "/", 2); len(spl) > 1 {
+ searchRegistries = append(searchRegistries, spl[0])
+ term = spl[1]
+ } else {
+ regs, err := sysregistriesv2.UnqualifiedSearchRegistries(r.systemContextCopy())
+ if err != nil {
+ return nil, err
+ }
+ searchRegistries = regs
+ }
+
+ logrus.Debugf("Searching images matching term %s at the following registries %s", term, searchRegistries)
+
+ // searchOutputData is used as a return value for searching in parallel.
+ type searchOutputData struct {
+ data []SearchResult
+ err error
+ }
+
+ sem := semaphore.NewWeighted(searchMaxParallel)
+ wg := sync.WaitGroup{}
+ wg.Add(len(searchRegistries))
+ data := make([]searchOutputData, len(searchRegistries))
+
+ for i := range searchRegistries {
+ if err := sem.Acquire(ctx, 1); err != nil {
+ return nil, err
+ }
+ index := i
+ go func() {
+ defer sem.Release(1)
+ defer wg.Done()
+ searchOutput, err := r.searchImageInRegistry(ctx, term, searchRegistries[index], options)
+ data[index] = searchOutputData{data: searchOutput, err: err}
+ }()
+ }
+
+ wg.Wait()
+ results := []SearchResult{}
+ var multiErr error
+ for _, d := range data {
+ if d.err != nil {
+ multiErr = multierror.Append(multiErr, d.err)
+ continue
+ }
+ results = append(results, d.data...)
+ }
+
+ // Optimistically assume that one successfully searched registry
+ // includes what the user is looking for.
+ if len(results) > 0 {
+ return results, nil
+ }
+ return results, multiErr
+}
+
+func (r *Runtime) searchImageInRegistry(ctx context.Context, term, registry string, options *SearchOptions) ([]SearchResult, error) {
+ // Max number of queries by default is 25
+ limit := searchMaxQueries
+ if options.Limit > 0 {
+ limit = options.Limit
+ }
+
+ sys := r.systemContextCopy()
+ if options.InsecureSkipTLSVerify != types.OptionalBoolUndefined {
+ sys.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify
+ }
+
+ if options.ListTags {
+ results, err := searchRepositoryTags(ctx, sys, registry, term, options)
+ if err != nil {
+ return []SearchResult{}, err
+ }
+ return results, nil
+ }
+
+ results, err := dockerTransport.SearchRegistry(ctx, sys, registry, term, limit)
+ if err != nil {
+ return []SearchResult{}, err
+ }
+ index := registry
+ arr := strings.Split(registry, ".")
+ if len(arr) > 2 {
+ index = strings.Join(arr[len(arr)-2:], ".")
+ }
+
+ // limit is the number of results to output
+ // if the total number of results is less than the limit, output all
+ // if the limit has been set by the user, output those number of queries
+ limit = searchMaxQueries
+ if len(results) < limit {
+ limit = len(results)
+ }
+ if options.Limit != 0 {
+ limit = len(results)
+ if options.Limit < len(results) {
+ limit = options.Limit
+ }
+ }
+
+ paramsArr := []SearchResult{}
+ for i := 0; i < limit; i++ {
+ // Check whether query matches filters
+ if !(options.Filter.matchesAutomatedFilter(results[i]) && options.Filter.matchesOfficialFilter(results[i]) && options.Filter.matchesStarFilter(results[i])) {
+ continue
+ }
+ official := ""
+ if results[i].IsOfficial {
+ official = "[OK]"
+ }
+ automated := ""
+ if results[i].IsAutomated {
+ automated = "[OK]"
+ }
+ description := strings.ReplaceAll(results[i].Description, "\n", " ")
+ if len(description) > 44 && !options.NoTrunc {
+ description = description[:searchTruncLength] + "..."
+ }
+ name := registry + "/" + results[i].Name
+ if index == "docker.io" && !strings.Contains(results[i].Name, "/") {
+ name = index + "/library/" + results[i].Name
+ }
+ params := SearchResult{
+ Index: index,
+ Name: name,
+ Description: description,
+ Official: official,
+ Automated: automated,
+ Stars: results[i].StarCount,
+ }
+ paramsArr = append(paramsArr, params)
+ }
+ return paramsArr, nil
+}
+
+func searchRepositoryTags(ctx context.Context, sys *types.SystemContext, registry, term string, options *SearchOptions) ([]SearchResult, error) {
+ dockerPrefix := "docker://"
+ imageRef, err := alltransports.ParseImageName(fmt.Sprintf("%s/%s", registry, term))
+ if err == nil && imageRef.Transport().Name() != dockerTransport.Transport.Name() {
+ return nil, errors.Errorf("reference %q must be a docker reference", term)
+ } else if err != nil {
+ imageRef, err = alltransports.ParseImageName(fmt.Sprintf("%s%s", dockerPrefix, fmt.Sprintf("%s/%s", registry, term)))
+ if err != nil {
+ return nil, errors.Errorf("reference %q must be a docker reference", term)
+ }
+ }
+ tags, err := dockerTransport.GetRepositoryTags(ctx, sys, imageRef)
+ if err != nil {
+ return nil, errors.Errorf("error getting repository tags: %v", err)
+ }
+ limit := searchMaxQueries
+ if len(tags) < limit {
+ limit = len(tags)
+ }
+ if options.Limit != 0 {
+ limit = len(tags)
+ if options.Limit < limit {
+ limit = options.Limit
+ }
+ }
+ paramsArr := []SearchResult{}
+ for i := 0; i < limit; i++ {
+ params := SearchResult{
+ Name: imageRef.DockerReference().Name(),
+ Tag: tags[i],
+ }
+ paramsArr = append(paramsArr, params)
+ }
+ return paramsArr, nil
+}
+
+func (f *SearchFilter) matchesStarFilter(result dockerTransport.SearchResult) bool {
+ return result.StarCount >= f.Stars
+}
+
+func (f *SearchFilter) matchesAutomatedFilter(result dockerTransport.SearchResult) bool {
+ if f.IsAutomated != types.OptionalBoolUndefined {
+ return result.IsAutomated == (f.IsAutomated == types.OptionalBoolTrue)
+ }
+ return true
+}
+
+func (f *SearchFilter) matchesOfficialFilter(result dockerTransport.SearchResult) bool {
+ if f.IsOfficial != types.OptionalBoolUndefined {
+ return result.IsOfficial == (f.IsOfficial == types.OptionalBoolTrue)
+ }
+ return true
+}
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index 1531422cd..371dd3667 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -47,18 +47,6 @@ const (
BoltDBStateStore RuntimeStateStore = iota
)
-// PullPolicy whether to pull new image
-type PullPolicy int
-
-const (
- // PullImageAlways always try to pull new image when create or run
- PullImageAlways PullPolicy = iota
- // PullImageMissing pulls image if it is not locally
- PullImageMissing
- // PullImageNever will never pull new image
- PullImageNever
-)
-
// Config contains configuration options for container tools
type Config struct {
// Containers specify settings that configure how containers will run ont the system
@@ -700,23 +688,6 @@ func (c *NetworkConfig) Validate() error {
return errors.Errorf("invalid cni_plugin_dirs: %s", strings.Join(c.CNIPluginDirs, ","))
}
-// ValidatePullPolicy check if the pullPolicy from CLI is valid and returns the valid enum type
-// if the value from CLI or containers.conf is invalid returns the error
-func ValidatePullPolicy(pullPolicy string) (PullPolicy, error) {
- switch strings.ToLower(pullPolicy) {
- case "always":
- return PullImageAlways, nil
- case "missing", "ifnotpresent":
- return PullImageMissing, nil
- case "never":
- return PullImageNever, nil
- case "":
- return PullImageMissing, nil
- default:
- return PullImageMissing, errors.Errorf("invalid pull policy %q", pullPolicy)
- }
-}
-
// FindConmon iterates over (*Config).ConmonPath and returns the path
// to first (version) matching conmon binary. If non is found, we try
// to do a path lookup of "conmon".
diff --git a/vendor/github.com/containers/common/pkg/config/pull_policy.go b/vendor/github.com/containers/common/pkg/config/pull_policy.go
new file mode 100644
index 000000000..7c32dd660
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/config/pull_policy.go
@@ -0,0 +1,95 @@
+package config
+
+import (
+ "fmt"
+
+ "github.com/pkg/errors"
+)
+
+// PullPolicy determines how and which images are being pulled from a container
+// registry (i.e., docker transport only).
+//
+// Supported string values are:
+// * "always" <-> PullPolicyAlways
+// * "missing" <-> PullPolicyMissing
+// * "newer" <-> PullPolicyNewer
+// * "never" <-> PullPolicyNever
+type PullPolicy int
+
+const (
+ // Always pull the image.
+ PullPolicyAlways PullPolicy = iota
+ // Pull the image only if it could not be found in the local containers
+ // storage.
+ PullPolicyMissing
+ // Never pull the image but use the one from the local containers
+ // storage.
+ PullPolicyNever
+ // Pull if the image on the registry is new than the one in the local
+ // containers storage. An image is considered to be newer when the
+ // digests are different. Comparing the time stamps is prone to
+ // errors.
+ PullPolicyNewer
+
+ // Ideally this should be the first `ioata` but backwards compatibility
+ // prevents us from changing the values.
+ PullPolicyUnsupported = -1
+)
+
+// String converts a PullPolicy into a string.
+//
+// Supported string values are:
+// * "always" <-> PullPolicyAlways
+// * "missing" <-> PullPolicyMissing
+// * "newer" <-> PullPolicyNewer
+// * "never" <-> PullPolicyNever
+func (p PullPolicy) String() string {
+ switch p {
+ case PullPolicyAlways:
+ return "always"
+ case PullPolicyMissing:
+ return "missing"
+ case PullPolicyNewer:
+ return "newer"
+ case PullPolicyNever:
+ return "never"
+ }
+ return fmt.Sprintf("unrecognized policy %d", p)
+}
+
+// Validate returns if the pull policy is not supported.
+func (p PullPolicy) Validate() error {
+ switch p {
+ case PullPolicyAlways, PullPolicyMissing, PullPolicyNewer, PullPolicyNever:
+ return nil
+ default:
+ return errors.Errorf("unsupported pull policy %d", p)
+ }
+}
+
+// ParsePullPolicy parses the string into a pull policy.
+//
+// Supported string values are:
+// * "always" <-> PullPolicyAlways
+// * "missing" <-> PullPolicyMissing (also "ifnotpresent" and "")
+// * "newer" <-> PullPolicyNewer (also "ifnewer")
+// * "never" <-> PullPolicyNever
+func ParsePullPolicy(s string) (PullPolicy, error) {
+ switch s {
+ case "always":
+ return PullPolicyAlways, nil
+ case "missing", "ifnotpresent", "":
+ return PullPolicyMissing, nil
+ case "newer", "ifnewer":
+ return PullPolicyNewer, nil
+ case "never":
+ return PullPolicyNever, nil
+ default:
+ return PullPolicyUnsupported, errors.Errorf("unsupported pull policy %q", s)
+ }
+}
+
+// Deprecated: please use `ParsePullPolicy` instead.
+func ValidatePullPolicy(s string) (PullPolicy, error) {
+ return ParsePullPolicy(s)
+}
diff --git a/vendor/github.com/containers/common/pkg/filters/filters.go b/vendor/github.com/containers/common/pkg/filters/filters.go
new file mode 100644
index 000000000..53f420db2
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/filters/filters.go
@@ -0,0 +1,118 @@
+package filters
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/containers/common/pkg/timetype"
+ "github.com/pkg/errors"
+)
+
+// ComputeUntilTimestamp extracts until timestamp from filters
+func ComputeUntilTimestamp(filterValues []string) (time.Time, error) {
+ invalid := time.Time{}
+ if len(filterValues) != 1 {
+ return invalid, errors.Errorf("specify exactly one timestamp for until")
+ }
+ ts, err := timetype.GetTimestamp(filterValues[0], time.Now())
+ if err != nil {
+ return invalid, err
+ }
+ seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0)
+ if err != nil {
+ return invalid, err
+ }
+ return time.Unix(seconds, nanoseconds), nil
+}
+
+// filtersFromRequests extracts the "filters" parameter from the specified
+// http.Request. The parameter can either be a `map[string][]string` as done
+// in new versions of Docker and libpod, or a `map[string]map[string]bool` as
+// done in older versions of Docker. We have to do a bit of Yoga to support
+// both - just as Docker does as well.
+//
+// Please refer to https://github.com/containers/podman/issues/6899 for some
+// background.
+func FiltersFromRequest(r *http.Request) ([]string, error) {
+ var (
+ compatFilters map[string]map[string]bool
+ filters map[string][]string
+ libpodFilters []string
+ raw []byte
+ )
+
+ if _, found := r.URL.Query()["filters"]; found {
+ raw = []byte(r.Form.Get("filters"))
+ } else if _, found := r.URL.Query()["Filters"]; found {
+ raw = []byte(r.Form.Get("Filters"))
+ } else {
+ return []string{}, nil
+ }
+
+ // Backwards compat with older versions of Docker.
+ if err := json.Unmarshal(raw, &compatFilters); err == nil {
+ for filterKey, filterMap := range compatFilters {
+ for filterValue, toAdd := range filterMap {
+ if toAdd {
+ libpodFilters = append(libpodFilters, fmt.Sprintf("%s=%s", filterKey, filterValue))
+ }
+ }
+ }
+ return libpodFilters, nil
+ }
+
+ if err := json.Unmarshal(raw, &filters); err != nil {
+ return nil, err
+ }
+
+ for filterKey, filterSlice := range filters {
+ f := filterKey
+ for _, filterValue := range filterSlice {
+ f += "=" + filterValue
+ }
+ libpodFilters = append(libpodFilters, f)
+ }
+
+ return libpodFilters, nil
+}
+
+// PrepareFilters prepares a *map[string][]string of filters to be later searched
+// in lipod and compat API to get desired filters
+func PrepareFilters(r *http.Request) (map[string][]string, error) {
+ filtersList, err := FiltersFromRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ filterMap := map[string][]string{}
+ for _, filter := range filtersList {
+ split := strings.SplitN(filter, "=", 2)
+ if len(split) > 1 {
+ filterMap[split[0]] = append(filterMap[split[0]], split[1])
+ }
+ }
+ return filterMap, nil
+}
+
+// MatchLabelFilters matches labels and returs true if they are valid
+func MatchLabelFilters(filterValues []string, labels map[string]string) bool {
+outer:
+ for _, filterValue := range filterValues {
+ filterArray := strings.SplitN(filterValue, "=", 2)
+ filterKey := filterArray[0]
+ if len(filterArray) > 1 {
+ filterValue = filterArray[1]
+ } else {
+ filterValue = ""
+ }
+ for labelKey, labelValue := range labels {
+ if labelKey == filterKey && (filterValue == "" || labelValue == filterValue) {
+ continue outer
+ }
+ }
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/containers/buildah/pkg/manifests/errors.go b/vendor/github.com/containers/common/pkg/manifests/errors.go
index 8398d7efc..8398d7efc 100644
--- a/vendor/github.com/containers/buildah/pkg/manifests/errors.go
+++ b/vendor/github.com/containers/common/pkg/manifests/errors.go
diff --git a/vendor/github.com/containers/buildah/pkg/manifests/manifests.go b/vendor/github.com/containers/common/pkg/manifests/manifests.go
index ea9495ee7..ea9495ee7 100644
--- a/vendor/github.com/containers/buildah/pkg/manifests/manifests.go
+++ b/vendor/github.com/containers/common/pkg/manifests/manifests.go
diff --git a/vendor/github.com/containers/common/pkg/signal/signal_common.go b/vendor/github.com/containers/common/pkg/signal/signal_common.go
new file mode 100644
index 000000000..7c2662909
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/signal/signal_common.go
@@ -0,0 +1,41 @@
+package signal
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+// ParseSignal translates a string to a valid syscall signal.
+// It returns an error if the signal map doesn't include the given signal.
+func ParseSignal(rawSignal string) (syscall.Signal, error) {
+ s, err := strconv.Atoi(rawSignal)
+ if err == nil {
+ if s == 0 {
+ return -1, fmt.Errorf("invalid signal: %s", rawSignal)
+ }
+ return syscall.Signal(s), nil
+ }
+ sig, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
+ if !ok {
+ return -1, fmt.Errorf("invalid signal: %s", rawSignal)
+ }
+ return sig, nil
+}
+
+// ParseSignalNameOrNumber translates a string to a valid syscall signal. Input
+// can be a name or number representation i.e. "KILL" "9"
+func ParseSignalNameOrNumber(rawSignal string) (syscall.Signal, error) {
+ basename := strings.TrimPrefix(rawSignal, "-")
+ s, err := ParseSignal(basename)
+ if err == nil {
+ return s, nil
+ }
+ for k, v := range signalMap {
+ if strings.EqualFold(k, basename) {
+ return v, nil
+ }
+ }
+ return -1, fmt.Errorf("invalid signal: %s", basename)
+}
diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux.go b/vendor/github.com/containers/common/pkg/signal/signal_linux.go
new file mode 100644
index 000000000..305b9d21f
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/signal/signal_linux.go
@@ -0,0 +1,108 @@
+// +build linux
+// +build !mips,!mipsle,!mips64,!mips64le
+
+// Signal handling for Linux only.
+package signal
+
+// Copyright 2013-2018 Docker, Inc.
+
+// NOTE: this package has originally been copied from github.com/docker/docker.
+
+import (
+ "os"
+ "os/signal"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ sigrtmin = 34
+ sigrtmax = 64
+
+ SIGWINCH = syscall.SIGWINCH // For cross-compilation with Windows
+)
+
+// signalMap is a map of Linux signals.
+var signalMap = map[string]syscall.Signal{
+ "ABRT": unix.SIGABRT,
+ "ALRM": unix.SIGALRM,
+ "BUS": unix.SIGBUS,
+ "CHLD": unix.SIGCHLD,
+ "CLD": unix.SIGCLD,
+ "CONT": unix.SIGCONT,
+ "FPE": unix.SIGFPE,
+ "HUP": unix.SIGHUP,
+ "ILL": unix.SIGILL,
+ "INT": unix.SIGINT,
+ "IO": unix.SIGIO,
+ "IOT": unix.SIGIOT,
+ "KILL": unix.SIGKILL,
+ "PIPE": unix.SIGPIPE,
+ "POLL": unix.SIGPOLL,
+ "PROF": unix.SIGPROF,
+ "PWR": unix.SIGPWR,
+ "QUIT": unix.SIGQUIT,
+ "SEGV": unix.SIGSEGV,
+ "STKFLT": unix.SIGSTKFLT,
+ "STOP": unix.SIGSTOP,
+ "SYS": unix.SIGSYS,
+ "TERM": unix.SIGTERM,
+ "TRAP": unix.SIGTRAP,
+ "TSTP": unix.SIGTSTP,
+ "TTIN": unix.SIGTTIN,
+ "TTOU": unix.SIGTTOU,
+ "URG": unix.SIGURG,
+ "USR1": unix.SIGUSR1,
+ "USR2": unix.SIGUSR2,
+ "VTALRM": unix.SIGVTALRM,
+ "WINCH": unix.SIGWINCH,
+ "XCPU": unix.SIGXCPU,
+ "XFSZ": unix.SIGXFSZ,
+ "RTMIN": sigrtmin,
+ "RTMIN+1": sigrtmin + 1,
+ "RTMIN+2": sigrtmin + 2,
+ "RTMIN+3": sigrtmin + 3,
+ "RTMIN+4": sigrtmin + 4,
+ "RTMIN+5": sigrtmin + 5,
+ "RTMIN+6": sigrtmin + 6,
+ "RTMIN+7": sigrtmin + 7,
+ "RTMIN+8": sigrtmin + 8,
+ "RTMIN+9": sigrtmin + 9,
+ "RTMIN+10": sigrtmin + 10,
+ "RTMIN+11": sigrtmin + 11,
+ "RTMIN+12": sigrtmin + 12,
+ "RTMIN+13": sigrtmin + 13,
+ "RTMIN+14": sigrtmin + 14,
+ "RTMIN+15": sigrtmin + 15,
+ "RTMAX-14": sigrtmax - 14,
+ "RTMAX-13": sigrtmax - 13,
+ "RTMAX-12": sigrtmax - 12,
+ "RTMAX-11": sigrtmax - 11,
+ "RTMAX-10": sigrtmax - 10,
+ "RTMAX-9": sigrtmax - 9,
+ "RTMAX-8": sigrtmax - 8,
+ "RTMAX-7": sigrtmax - 7,
+ "RTMAX-6": sigrtmax - 6,
+ "RTMAX-5": sigrtmax - 5,
+ "RTMAX-4": sigrtmax - 4,
+ "RTMAX-3": sigrtmax - 3,
+ "RTMAX-2": sigrtmax - 2,
+ "RTMAX-1": sigrtmax - 1,
+ "RTMAX": sigrtmax,
+}
+
+// CatchAll catches all signals and relays them to the specified channel.
+func CatchAll(sigc chan os.Signal) {
+ handledSigs := make([]os.Signal, 0, len(signalMap))
+ for _, s := range signalMap {
+ handledSigs = append(handledSigs, s)
+ }
+ signal.Notify(sigc, handledSigs...)
+}
+
+// StopCatch stops catching the signals and closes the specified channel.
+func StopCatch(sigc chan os.Signal) {
+ signal.Stop(sigc)
+ close(sigc)
+}
diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go
new file mode 100644
index 000000000..45c9d5af1
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go
@@ -0,0 +1,108 @@
+// +build linux
+// +build mips mipsle mips64 mips64le
+
+// Special signal handling for mips architecture
+package signal
+
+// Copyright 2013-2018 Docker, Inc.
+
+// NOTE: this package has originally been copied from github.com/docker/docker.
+
+import (
+ "os"
+ "os/signal"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ sigrtmin = 34
+ sigrtmax = 127
+
+ SIGWINCH = syscall.SIGWINCH
+)
+
+// signalMap is a map of Linux signals.
+var signalMap = map[string]syscall.Signal{
+ "ABRT": unix.SIGABRT,
+ "ALRM": unix.SIGALRM,
+ "BUS": unix.SIGBUS,
+ "CHLD": unix.SIGCHLD,
+ "CLD": unix.SIGCLD,
+ "CONT": unix.SIGCONT,
+ "FPE": unix.SIGFPE,
+ "HUP": unix.SIGHUP,
+ "ILL": unix.SIGILL,
+ "INT": unix.SIGINT,
+ "IO": unix.SIGIO,
+ "IOT": unix.SIGIOT,
+ "KILL": unix.SIGKILL,
+ "PIPE": unix.SIGPIPE,
+ "POLL": unix.SIGPOLL,
+ "PROF": unix.SIGPROF,
+ "PWR": unix.SIGPWR,
+ "QUIT": unix.SIGQUIT,
+ "SEGV": unix.SIGSEGV,
+ "EMT": unix.SIGEMT,
+ "STOP": unix.SIGSTOP,
+ "SYS": unix.SIGSYS,
+ "TERM": unix.SIGTERM,
+ "TRAP": unix.SIGTRAP,
+ "TSTP": unix.SIGTSTP,
+ "TTIN": unix.SIGTTIN,
+ "TTOU": unix.SIGTTOU,
+ "URG": unix.SIGURG,
+ "USR1": unix.SIGUSR1,
+ "USR2": unix.SIGUSR2,
+ "VTALRM": unix.SIGVTALRM,
+ "WINCH": unix.SIGWINCH,
+ "XCPU": unix.SIGXCPU,
+ "XFSZ": unix.SIGXFSZ,
+ "RTMIN": sigrtmin,
+ "RTMIN+1": sigrtmin + 1,
+ "RTMIN+2": sigrtmin + 2,
+ "RTMIN+3": sigrtmin + 3,
+ "RTMIN+4": sigrtmin + 4,
+ "RTMIN+5": sigrtmin + 5,
+ "RTMIN+6": sigrtmin + 6,
+ "RTMIN+7": sigrtmin + 7,
+ "RTMIN+8": sigrtmin + 8,
+ "RTMIN+9": sigrtmin + 9,
+ "RTMIN+10": sigrtmin + 10,
+ "RTMIN+11": sigrtmin + 11,
+ "RTMIN+12": sigrtmin + 12,
+ "RTMIN+13": sigrtmin + 13,
+ "RTMIN+14": sigrtmin + 14,
+ "RTMIN+15": sigrtmin + 15,
+ "RTMAX-14": sigrtmax - 14,
+ "RTMAX-13": sigrtmax - 13,
+ "RTMAX-12": sigrtmax - 12,
+ "RTMAX-11": sigrtmax - 11,
+ "RTMAX-10": sigrtmax - 10,
+ "RTMAX-9": sigrtmax - 9,
+ "RTMAX-8": sigrtmax - 8,
+ "RTMAX-7": sigrtmax - 7,
+ "RTMAX-6": sigrtmax - 6,
+ "RTMAX-5": sigrtmax - 5,
+ "RTMAX-4": sigrtmax - 4,
+ "RTMAX-3": sigrtmax - 3,
+ "RTMAX-2": sigrtmax - 2,
+ "RTMAX-1": sigrtmax - 1,
+ "RTMAX": sigrtmax,
+}
+
+// CatchAll catches all signals and relays them to the specified channel.
+func CatchAll(sigc chan os.Signal) {
+ handledSigs := make([]os.Signal, 0, len(signalMap))
+ for _, s := range signalMap {
+ handledSigs = append(handledSigs, s)
+ }
+ signal.Notify(sigc, handledSigs...)
+}
+
+// StopCatch stops catching the signals and closes the specified channel.
+func StopCatch(sigc chan os.Signal) {
+ signal.Stop(sigc)
+ close(sigc)
+}
diff --git a/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go
new file mode 100644
index 000000000..9d1733c02
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go
@@ -0,0 +1,99 @@
+// +build !linux
+
+// Signal handling for Linux only.
+package signal
+
+import (
+ "os"
+ "syscall"
+)
+
+const (
+ sigrtmin = 34
+ sigrtmax = 64
+
+ SIGWINCH = syscall.Signal(0xff)
+)
+
+// signalMap is a map of Linux signals.
+// These constants are sourced from the Linux version of golang.org/x/sys/unix
+// (I don't see much risk of this changing).
+// This should work as long as Podman only runs containers on Linux, which seems
+// a safe assumption for now.
+var signalMap = map[string]syscall.Signal{
+ "ABRT": syscall.Signal(0x6),
+ "ALRM": syscall.Signal(0xe),
+ "BUS": syscall.Signal(0x7),
+ "CHLD": syscall.Signal(0x11),
+ "CLD": syscall.Signal(0x11),
+ "CONT": syscall.Signal(0x12),
+ "FPE": syscall.Signal(0x8),
+ "HUP": syscall.Signal(0x1),
+ "ILL": syscall.Signal(0x4),
+ "INT": syscall.Signal(0x2),
+ "IO": syscall.Signal(0x1d),
+ "IOT": syscall.Signal(0x6),
+ "KILL": syscall.Signal(0x9),
+ "PIPE": syscall.Signal(0xd),
+ "POLL": syscall.Signal(0x1d),
+ "PROF": syscall.Signal(0x1b),
+ "PWR": syscall.Signal(0x1e),
+ "QUIT": syscall.Signal(0x3),
+ "SEGV": syscall.Signal(0xb),
+ "STKFLT": syscall.Signal(0x10),
+ "STOP": syscall.Signal(0x13),
+ "SYS": syscall.Signal(0x1f),
+ "TERM": syscall.Signal(0xf),
+ "TRAP": syscall.Signal(0x5),
+ "TSTP": syscall.Signal(0x14),
+ "TTIN": syscall.Signal(0x15),
+ "TTOU": syscall.Signal(0x16),
+ "URG": syscall.Signal(0x17),
+ "USR1": syscall.Signal(0xa),
+ "USR2": syscall.Signal(0xc),
+ "VTALRM": syscall.Signal(0x1a),
+ "WINCH": syscall.Signal(0x1c),
+ "XCPU": syscall.Signal(0x18),
+ "XFSZ": syscall.Signal(0x19),
+ "RTMIN": sigrtmin,
+ "RTMIN+1": sigrtmin + 1,
+ "RTMIN+2": sigrtmin + 2,
+ "RTMIN+3": sigrtmin + 3,
+ "RTMIN+4": sigrtmin + 4,
+ "RTMIN+5": sigrtmin + 5,
+ "RTMIN+6": sigrtmin + 6,
+ "RTMIN+7": sigrtmin + 7,
+ "RTMIN+8": sigrtmin + 8,
+ "RTMIN+9": sigrtmin + 9,
+ "RTMIN+10": sigrtmin + 10,
+ "RTMIN+11": sigrtmin + 11,
+ "RTMIN+12": sigrtmin + 12,
+ "RTMIN+13": sigrtmin + 13,
+ "RTMIN+14": sigrtmin + 14,
+ "RTMIN+15": sigrtmin + 15,
+ "RTMAX-14": sigrtmax - 14,
+ "RTMAX-13": sigrtmax - 13,
+ "RTMAX-12": sigrtmax - 12,
+ "RTMAX-11": sigrtmax - 11,
+ "RTMAX-10": sigrtmax - 10,
+ "RTMAX-9": sigrtmax - 9,
+ "RTMAX-8": sigrtmax - 8,
+ "RTMAX-7": sigrtmax - 7,
+ "RTMAX-6": sigrtmax - 6,
+ "RTMAX-5": sigrtmax - 5,
+ "RTMAX-4": sigrtmax - 4,
+ "RTMAX-3": sigrtmax - 3,
+ "RTMAX-2": sigrtmax - 2,
+ "RTMAX-1": sigrtmax - 1,
+ "RTMAX": sigrtmax,
+}
+
+// CatchAll catches all signals and relays them to the specified channel.
+func CatchAll(sigc chan os.Signal) {
+ panic("Unsupported on non-linux platforms")
+}
+
+// StopCatch stops catching the signals and closes the specified channel.
+func StopCatch(sigc chan os.Signal) {
+ panic("Unsupported on non-linux platforms")
+}
diff --git a/vendor/github.com/containers/buildah/pkg/supplemented/errors.go b/vendor/github.com/containers/common/pkg/supplemented/errors.go
index 6de679b50..a031951f1 100644
--- a/vendor/github.com/containers/buildah/pkg/supplemented/errors.go
+++ b/vendor/github.com/containers/common/pkg/supplemented/errors.go
@@ -3,7 +3,7 @@ package supplemented
import (
"errors"
- "github.com/containers/buildah/pkg/manifests"
+ "github.com/containers/common/pkg/manifests"
)
var (
diff --git a/vendor/github.com/containers/buildah/pkg/supplemented/supplemented.go b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go
index a36c3eda4..a36c3eda4 100644
--- a/vendor/github.com/containers/buildah/pkg/supplemented/supplemented.go
+++ b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go
diff --git a/vendor/github.com/containers/common/pkg/timetype/timestamp.go b/vendor/github.com/containers/common/pkg/timetype/timestamp.go
new file mode 100644
index 000000000..ce2cb64f2
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/timetype/timestamp.go
@@ -0,0 +1,131 @@
+package timetype
+
+// code adapted from https://github.com/moby/moby/blob/master/api/types/time/timestamp.go
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// These are additional predefined layouts for use in Time.Format and Time.Parse
+// with --since and --until parameters for `docker logs` and `docker events`
+const (
+ rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
+ rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
+ dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
+ dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
+)
+
+// GetTimestamp tries to parse given string as golang duration,
+// then RFC3339 time and finally as a Unix timestamp. If
+// any of these were successful, it returns a Unix timestamp
+// as string otherwise returns the given value back.
+// In case of duration input, the returned timestamp is computed
+// as the given reference time minus the amount of the duration.
+func GetTimestamp(value string, reference time.Time) (string, error) {
+ if d, err := time.ParseDuration(value); value != "0" && err == nil {
+ return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil
+ }
+
+ var format string
+ // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
+ parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
+
+ if strings.Contains(value, ".") { // nolint:gocritic
+ if parseInLocation {
+ format = rFC3339NanoLocal
+ } else {
+ format = time.RFC3339Nano
+ }
+ } else if strings.Contains(value, "T") {
+ // we want the number of colons in the T portion of the timestamp
+ tcolons := strings.Count(value, ":")
+ // if parseInLocation is off and we have a +/- zone offset (not Z) then
+ // there will be an extra colon in the input for the tz offset subtract that
+ // colon from the tcolons count
+ if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
+ tcolons--
+ }
+ if parseInLocation {
+ switch tcolons {
+ case 0:
+ format = "2006-01-02T15"
+ case 1:
+ format = "2006-01-02T15:04"
+ default:
+ format = rFC3339Local
+ }
+ } else {
+ switch tcolons {
+ case 0:
+ format = "2006-01-02T15Z07:00"
+ case 1:
+ format = "2006-01-02T15:04Z07:00"
+ default:
+ format = time.RFC3339
+ }
+ }
+ } else if parseInLocation {
+ format = dateLocal
+ } else {
+ format = dateWithZone
+ }
+
+ var t time.Time
+ var err error
+
+ if parseInLocation {
+ t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone()))
+ } else {
+ t, err = time.Parse(format, value)
+ }
+
+ if err != nil {
+ // if there is a `-` then it's an RFC3339 like timestamp
+ if strings.Contains(value, "-") {
+ return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
+ }
+ if _, _, err := parseTimestamp(value); err != nil {
+ return "", fmt.Errorf("failed to parse value as time or duration: %q", value)
+ }
+ return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
+ }
+
+ return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
+}
+
+// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
+// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
+// if the incoming nanosecond portion is longer or shorter than 9 digits it is
+// converted to nanoseconds. The expectation is that the seconds and
+// seconds will be used to create a time variable. For example:
+// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
+// if err == nil since := time.Unix(seconds, nanoseconds)
+// returns seconds as def(aultSeconds) if value == ""
+func ParseTimestamps(value string, def int64) (secs, nanoSecs int64, err error) {
+ if value == "" {
+ return def, 0, nil
+ }
+ return parseTimestamp(value)
+}
+
+func parseTimestamp(value string) (int64, int64, error) { // nolint:gocritic
+ sa := strings.SplitN(value, ".", 2)
+ s, err := strconv.ParseInt(sa[0], 10, 64)
+ if err != nil {
+ return s, 0, err
+ }
+ if len(sa) != 2 {
+ return s, 0, nil
+ }
+ n, err := strconv.ParseInt(sa[1], 10, 64)
+ if err != nil {
+ return s, n, err
+ }
+ // should already be in nanoseconds but just in case convert n to nanoseconds
+ n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1]))))
+ return s, n, nil
+}
diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go
index cb1eb342d..af0a1269e 100644
--- a/vendor/github.com/containers/common/version/version.go
+++ b/vendor/github.com/containers/common/version/version.go
@@ -1,4 +1,4 @@
package version
// Version is the version of the build.
-const Version = "0.37.1"
+const Version = "0.37.2-dev"
diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go
deleted file mode 100644
index 4f2465cac..000000000
--- a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package tarfile
-
-import (
- "context"
- "io"
-
- internal "github.com/containers/image/v5/docker/internal/tarfile"
- "github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/types"
- "github.com/opencontainers/go-digest"
-)
-
-// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
-type Destination struct {
- internal *internal.Destination
- archive *internal.Writer
-}
-
-// NewDestination returns a tarfile.Destination for the specified io.Writer.
-// Deprecated: please use NewDestinationWithContext instead
-func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
- return NewDestinationWithContext(nil, dest, ref)
-}
-
-// NewDestinationWithContext returns a tarfile.Destination for the specified io.Writer.
-func NewDestinationWithContext(sys *types.SystemContext, dest io.Writer, ref reference.NamedTagged) *Destination {
- archive := internal.NewWriter(dest)
- return &Destination{
- internal: internal.NewDestination(sys, archive, ref),
- archive: archive,
- }
-}
-
-// AddRepoTags adds the specified tags to the destination's repoTags.
-func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
- d.internal.AddRepoTags(tags)
-}
-
-// SupportedManifestMIMETypes tells which manifest mime types the destination supports
-// If an empty slice or nil it's returned, then any mime type can be tried to upload
-func (d *Destination) SupportedManifestMIMETypes() []string {
- return d.internal.SupportedManifestMIMETypes()
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *Destination) SupportsSignatures(ctx context.Context) error {
- return d.internal.SupportsSignatures(ctx)
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *Destination) AcceptsForeignLayerURLs() bool {
- return d.internal.AcceptsForeignLayerURLs()
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *Destination) MustMatchRuntimeOS() bool {
- return d.internal.MustMatchRuntimeOS()
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *Destination) IgnoresEmbeddedDockerReference() bool {
- return d.internal.IgnoresEmbeddedDockerReference()
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *Destination) HasThreadSafePutBlob() bool {
- return d.internal.HasThreadSafePutBlob()
-}
-
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
-// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
-// inputInfo.Size is the expected length of stream, if known.
-// May update cache.
-// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
-// to any other readers for download using the supplied digest.
-// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- return d.internal.PutBlob(ctx, stream, inputInfo, cache, isConfig)
-}
-
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
-// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
-// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
-// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
-// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
-// reflected in the manifest that will be written.
-// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
- return d.internal.TryReusingBlob(ctx, info, cache, canSubstitute)
-}
-
-// PutManifest writes manifest to the destination.
-// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
-// there can be no secondary manifests.
-// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
-// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
-// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
-func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
- return d.internal.PutManifest(ctx, m, instanceDigest)
-}
-
-// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
-// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
-// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
-func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- return d.internal.PutSignatures(ctx, signatures, instanceDigest)
-}
-
-// Commit finishes writing data to the underlying io.Writer.
-// It is the caller's responsibility to close it, if necessary.
-func (d *Destination) Commit(ctx context.Context) error {
- return d.archive.Close()
-}
diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/doc.go b/vendor/github.com/containers/image/v5/docker/tarfile/doc.go
deleted file mode 100644
index 4ea5369c0..000000000
--- a/vendor/github.com/containers/image/v5/docker/tarfile/doc.go
+++ /dev/null
@@ -1,3 +0,0 @@
-// Package tarfile is an internal implementation detail of some transports.
-// Do not use outside of the github.com/containers/image repo!
-package tarfile
diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/tarfile/src.go
deleted file mode 100644
index ee341eb39..000000000
--- a/vendor/github.com/containers/image/v5/docker/tarfile/src.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package tarfile
-
-import (
- "context"
- "io"
-
- internal "github.com/containers/image/v5/docker/internal/tarfile"
- "github.com/containers/image/v5/types"
- digest "github.com/opencontainers/go-digest"
-)
-
-// Source is a partial implementation of types.ImageSource for reading from tarPath.
-// Most users should use this via implementations of ImageReference from docker/archive or docker/daemon.
-type Source struct {
- internal *internal.Source
-}
-
-// NewSourceFromFile returns a tarfile.Source for the specified path.
-// Deprecated: Please use NewSourceFromFileWithContext which will allows you to configure temp directory
-// for big files through SystemContext.BigFilesTemporaryDir
-func NewSourceFromFile(path string) (*Source, error) {
- return NewSourceFromFileWithContext(nil, path)
-}
-
-// NewSourceFromFileWithContext returns a tarfile.Source for the specified path.
-func NewSourceFromFileWithContext(sys *types.SystemContext, path string) (*Source, error) {
- archive, err := internal.NewReaderFromFile(sys, path)
- if err != nil {
- return nil, err
- }
- src := internal.NewSource(archive, true, nil, -1)
- return &Source{internal: src}, nil
-}
-
-// NewSourceFromStream returns a tarfile.Source for the specified inputStream,
-// which can be either compressed or uncompressed. The caller can close the
-// inputStream immediately after NewSourceFromFile returns.
-// Deprecated: Please use NewSourceFromStreamWithSystemContext which will allows you to configure
-// temp directory for big files through SystemContext.BigFilesTemporaryDir
-func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
- return NewSourceFromStreamWithSystemContext(nil, inputStream)
-}
-
-// NewSourceFromStreamWithSystemContext returns a tarfile.Source for the specified inputStream,
-// which can be either compressed or uncompressed. The caller can close the
-// inputStream immediately after NewSourceFromFile returns.
-func NewSourceFromStreamWithSystemContext(sys *types.SystemContext, inputStream io.Reader) (*Source, error) {
- archive, err := internal.NewReaderFromStream(sys, inputStream)
- if err != nil {
- return nil, err
- }
- src := internal.NewSource(archive, true, nil, -1)
- return &Source{internal: src}, nil
-}
-
-// Close removes resources associated with an initialized Source, if any.
-func (s *Source) Close() error {
- return s.internal.Close()
-}
-
-// LoadTarManifest loads and decodes the manifest.json
-func (s *Source) LoadTarManifest() ([]ManifestItem, error) {
- return s.internal.TarManifest(), nil
-}
-
-// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
-// It may use a remote (= slow) service.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
-// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
-// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
-// as the primary manifest can not be a list, so there can be no secondary instances.
-func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
- return s.internal.GetManifest(ctx, instanceDigest)
-}
-
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *Source) HasThreadSafeGetBlob() bool {
- return s.internal.HasThreadSafeGetBlob()
-}
-
-// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
-// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
-func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
- return s.internal.GetBlob(ctx, info, cache)
-}
-
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
-// as there can be no secondary manifests.
-func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- return s.internal.GetSignatures(ctx, instanceDigest)
-}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
-// as the primary manifest can not be a list, so there can be no secondary manifests.
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *Source) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
- return s.internal.LayerInfosForCopy(ctx, instanceDigest)
-}
diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/types.go b/vendor/github.com/containers/image/v5/docker/tarfile/types.go
deleted file mode 100644
index 0f14389e6..000000000
--- a/vendor/github.com/containers/image/v5/docker/tarfile/types.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package tarfile
-
-import (
- internal "github.com/containers/image/v5/docker/internal/tarfile"
-)
-
-// ManifestItem is an element of the array stored in the top-level manifest.json file.
-type ManifestItem = internal.ManifestItem // All public members from the internal package remain accessible.
diff --git a/vendor/github.com/disiqueira/gotree/v3/.gitignore b/vendor/github.com/disiqueira/gotree/v3/.gitignore
new file mode 100644
index 000000000..3236c30ab
--- /dev/null
+++ b/vendor/github.com/disiqueira/gotree/v3/.gitignore
@@ -0,0 +1,137 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+.idea/
+GoTree.iml
+### Linux template
+*~
+
+# temporary files which can be created if a process still has a handle open of a deleted file
+.fuse_hidden*
+
+# KDE directory preferences
+.directory
+
+# Linux trash folder which might appear on any partition or disk
+.Trash-*
+### Windows template
+# Windows image file caches
+Thumbs.db
+ehthumbs.db
+
+# Folder config file
+Desktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+### JetBrains template
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff:
+.idea/workspace.xml
+.idea/tasks.xml
+.idea/dictionaries
+.idea/vcs.xml
+.idea/jsLibraryMappings.xml
+
+# Sensitive or high-churn files:
+.idea/dataSources.ids
+.idea/dataSources.xml
+.idea/dataSources.local.xml
+.idea/sqlDataSources.xml
+.idea/dynamic.xml
+.idea/uiDesigner.xml
+
+# Gradle:
+.idea/gradle.xml
+.idea/libraries
+
+# Mongo Explorer plugin:
+.idea/mongoSettings.xml
+
+## File-based project format:
+*.iws
+
+## Plugin-specific files:
+
+# IntelliJ
+/out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+### Go template
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+
+# Folders
+
+# Architecture specific extensions/prefixes
+
+
+
+### OSX template
+*.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
diff --git a/vendor/github.com/disiqueira/gotree/v3/.travis.yml b/vendor/github.com/disiqueira/gotree/v3/.travis.yml
new file mode 100644
index 000000000..29261dfff
--- /dev/null
+++ b/vendor/github.com/disiqueira/gotree/v3/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+go_import_path: github.com/disiqueira/gotree
+git:
+ depth: 1
+env:
+ - GO111MODULE=on
+ - GO111MODULE=off
+go: [ 1.11.x, 1.12.x, 1.13.x ]
+os: [ linux, osx ]
+script:
+ - go test -race -v ./...
diff --git a/vendor/github.com/disiqueira/gotree/v3/LICENSE b/vendor/github.com/disiqueira/gotree/v3/LICENSE
new file mode 100644
index 000000000..e790b5a52
--- /dev/null
+++ b/vendor/github.com/disiqueira/gotree/v3/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017 Diego Siqueira
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/disiqueira/gotree/v3/README.md b/vendor/github.com/disiqueira/gotree/v3/README.md
new file mode 100644
index 000000000..d09d4a98c
--- /dev/null
+++ b/vendor/github.com/disiqueira/gotree/v3/README.md
@@ -0,0 +1,104 @@
+# ![GoTree](https://rawgit.com/DiSiqueira/GoTree/master/gotree-logo.png)
+
+# GoTree ![Language Badge](https://img.shields.io/badge/Language-Go-blue.svg) ![Go Report](https://goreportcard.com/badge/github.com/DiSiqueira/GoTree) ![License Badge](https://img.shields.io/badge/License-MIT-blue.svg) ![Status Badge](https://img.shields.io/badge/Status-Beta-brightgreen.svg) [![GoDoc](https://godoc.org/github.com/DiSiqueira/GoTree?status.svg)](https://godoc.org/github.com/DiSiqueira/GoTree) [![Build Status](https://travis-ci.org/DiSiqueira/GoTree.svg?branch=master)](https://travis-ci.org/DiSiqueira/GoTree)
+
+Simple Go module to print tree structures in terminal. Heavily inpired by [The Tree Command for Linux][treecommand]
+
+The GoTree's goal is to be a simple tool providing a stupidly easy-to-use and fast way to print recursive structures.
+
+[treecommand]: http://mama.indstate.edu/users/ice/tree/
+
+## Project Status
+
+GoTree is on beta. Pull Requests [are welcome](https://github.com/DiSiqueira/GoTree#social-coding)
+
+![](http://image.prntscr.com/image/2a0dbf0777454446b8083fb6a0dc51fe.png)
+
+## Features
+
+- Very simple and fast code
+- Intuitive names
+- Easy to extend
+- Uses only native libs
+- STUPIDLY [EASY TO USE](https://github.com/DiSiqueira/GoTree#usage)
+
+## Installation
+
+### Go Get
+
+```bash
+$ go get github.com/disiqueira/gotree
+```
+
+## Usage
+
+### Simple create, populate and print example
+
+![](http://image.prntscr.com/image/dd2fe3737e6543f7b21941a6953598c2.png)
+
+```golang
+package main
+
+import (
+ "fmt"
+
+ "github.com/disiqueira/gotree"
+)
+
+func main() {
+ artist := gotree.New("Pantera")
+ album := artist.Add("Far Beyond Driven")
+ album.Add("5 minutes Alone")
+
+ fmt.Println(artist.Print())
+}
+```
+
+## Contributing
+
+### Bug Reports & Feature Requests
+
+Please use the [issue tracker](https://github.com/DiSiqueira/GoTree/issues) to report any bugs or file feature requests.
+
+### Developing
+
+PRs are welcome. To begin developing, do this:
+
+```bash
+$ git clone --recursive git@github.com:DiSiqueira/GoTree.git
+$ cd GoTree/
+```
+
+## Social Coding
+
+1. Create an issue to discuss about your idea
+2. [Fork it] (https://github.com/DiSiqueira/GoTree/fork)
+3. Create your feature branch (`git checkout -b my-new-feature`)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+5. Push to the branch (`git push origin my-new-feature`)
+6. Create a new Pull Request
+7. Profit! :white_check_mark:
+
+## License
+
+The MIT License (MIT)
+
+Copyright (c) 2013-2018 Diego Siqueira
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/disiqueira/gotree/v3/_config.yml b/vendor/github.com/disiqueira/gotree/v3/_config.yml
new file mode 100644
index 000000000..c74188174
--- /dev/null
+++ b/vendor/github.com/disiqueira/gotree/v3/_config.yml
@@ -0,0 +1 @@
+theme: jekyll-theme-slate \ No newline at end of file
diff --git a/vendor/github.com/disiqueira/gotree/v3/go.mod b/vendor/github.com/disiqueira/gotree/v3/go.mod
new file mode 100644
index 000000000..7e17c637e
--- /dev/null
+++ b/vendor/github.com/disiqueira/gotree/v3/go.mod
@@ -0,0 +1,3 @@
+module github.com/disiqueira/gotree/v3
+
+go 1.13
diff --git a/vendor/github.com/disiqueira/gotree/v3/gotree-logo.png b/vendor/github.com/disiqueira/gotree/v3/gotree-logo.png
new file mode 100644
index 000000000..1735c6008
--- /dev/null
+++ b/vendor/github.com/disiqueira/gotree/v3/gotree-logo.png
Binary files differ
diff --git a/vendor/github.com/disiqueira/gotree/v3/gotree.go b/vendor/github.com/disiqueira/gotree/v3/gotree.go
new file mode 100644
index 000000000..c529f62be
--- /dev/null
+++ b/vendor/github.com/disiqueira/gotree/v3/gotree.go
@@ -0,0 +1,129 @@
+// Package gotree create and print tree.
+package gotree
+
+import (
+ "strings"
+)
+
+const (
+ newLine = "\n"
+ emptySpace = " "
+ middleItem = "├── "
+ continueItem = "│ "
+ lastItem = "└── "
+)
+
+type (
+ tree struct {
+ text string
+ items []Tree
+ }
+
+ // Tree is tree interface
+ Tree interface {
+ Add(text string) Tree
+ AddTree(tree Tree)
+ Items() []Tree
+ Text() string
+ Print() string
+ }
+
+ printer struct {
+ }
+
+ // Printer is printer interface
+ Printer interface {
+ Print(Tree) string
+ }
+)
+
+//New returns a new GoTree.Tree
+func New(text string) Tree {
+ return &tree{
+ text: text,
+ items: []Tree{},
+ }
+}
+
+//Add adds a node to the tree
+func (t *tree) Add(text string) Tree {
+ n := New(text)
+ t.items = append(t.items, n)
+ return n
+}
+
+//AddTree adds a tree as an item
+func (t *tree) AddTree(tree Tree) {
+ t.items = append(t.items, tree)
+}
+
+//Text returns the node's value
+func (t *tree) Text() string {
+ return t.text
+}
+
+//Items returns all items in the tree
+func (t *tree) Items() []Tree {
+ return t.items
+}
+
+//Print returns an visual representation of the tree
+func (t *tree) Print() string {
+ return newPrinter().Print(t)
+}
+
+func newPrinter() Printer {
+ return &printer{}
+}
+
+//Print prints a tree to a string
+func (p *printer) Print(t Tree) string {
+ return t.Text() + newLine + p.printItems(t.Items(), []bool{})
+}
+
+func (p *printer) printText(text string, spaces []bool, last bool) string {
+ var result string
+ for _, space := range spaces {
+ if space {
+ result += emptySpace
+ } else {
+ result += continueItem
+ }
+ }
+
+ indicator := middleItem
+ if last {
+ indicator = lastItem
+ }
+
+ var out string
+ lines := strings.Split(text, "\n")
+ for i := range lines {
+ text := lines[i]
+ if i == 0 {
+ out += result + indicator + text + newLine
+ continue
+ }
+ if last {
+ indicator = emptySpace
+ } else {
+ indicator = continueItem
+ }
+ out += result + indicator + text + newLine
+ }
+
+ return out
+}
+
+func (p *printer) printItems(t []Tree, spaces []bool) string {
+ var result string
+ for i, f := range t {
+ last := i == len(t)-1
+ result += p.printText(f.Text(), spaces, last)
+ if len(f.Items()) > 0 {
+ spacesChild := append(spaces, last)
+ result += p.printItems(f.Items(), spacesChild)
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/ishidawataru/sctp/.travis.yml b/vendor/github.com/ishidawataru/sctp/.travis.yml
index 01a76be9a..a1c693c01 100644
--- a/vendor/github.com/ishidawataru/sctp/.travis.yml
+++ b/vendor/github.com/ishidawataru/sctp/.travis.yml
@@ -1,10 +1,20 @@
language: go
+arch:
+ - amd64
+ - ppc64le
go:
- 1.9.x
- 1.10.x
- 1.11.x
- 1.12.x
- 1.13.x
+# allowing test cases to fail for the versions were not suppotred by ppc64le
+matrix:
+ allow_failures:
+ - go: 1.9.x
+ - go: 1.10.x
+ - go: 1.13.x
+
script:
- go test -v -race ./...
@@ -12,6 +22,7 @@ script:
- GOOS=linux GOARCH=arm go build .
- GOOS=linux GOARCH=arm64 go build .
- GOOS=linux GOARCH=ppc64le go build .
+ - GOOS=linux GOARCH=mips64le go build .
- (go version | grep go1.6 > /dev/null) || GOOS=linux GOARCH=s390x go build .
# can be compiled but not functional:
- GOOS=linux GOARCH=386 go build .
diff --git a/vendor/github.com/ishidawataru/sctp/sctp_linux.go b/vendor/github.com/ishidawataru/sctp/sctp_linux.go
index ac340ddfb..d96d09e5c 100644
--- a/vendor/github.com/ishidawataru/sctp/sctp_linux.go
+++ b/vendor/github.com/ishidawataru/sctp/sctp_linux.go
@@ -212,7 +212,7 @@ func listenSCTPExtConfig(network string, laddr *SCTPAddr, options InitMsg, contr
laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv6zero})
}
}
- err := SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR)
+ err = SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/jinzhu/copier/License b/vendor/github.com/jinzhu/copier/License
new file mode 100644
index 000000000..e2dc5381e
--- /dev/null
+++ b/vendor/github.com/jinzhu/copier/License
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Jinzhu
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/jinzhu/copier/README.md b/vendor/github.com/jinzhu/copier/README.md
new file mode 100644
index 000000000..cff72405c
--- /dev/null
+++ b/vendor/github.com/jinzhu/copier/README.md
@@ -0,0 +1,131 @@
+# Copier
+
+ I am a copier, I copy everything from one to another
+
+[![test status](https://github.com/jinzhu/copier/workflows/tests/badge.svg?branch=master "test status")](https://github.com/jinzhu/copier/actions)
+
+## Features
+
+* Copy from field to field with same name
+* Copy from method to field with same name
+* Copy from field to method with same name
+* Copy from slice to slice
+* Copy from struct to slice
+* Copy from map to map
+* Enforce copying a field with a tag
+* Ignore a field with a tag
+* Deep Copy
+
+## Usage
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/jinzhu/copier"
+)
+
+type User struct {
+ Name string
+ Role string
+ Age int32
+
+ // Explicitly ignored in the destination struct.
+ Salary int
+}
+
+func (user *User) DoubleAge() int32 {
+ return 2 * user.Age
+}
+
+// Tags in the destination Struct provide instructions to copier.Copy to ignore
+// or enforce copying and to panic or return an error if a field was not copied.
+type Employee struct {
+ // Tell copier.Copy to panic if this field is not copied.
+ Name string `copier:"must"`
+
+ // Tell copier.Copy to return an error if this field is not copied.
+ Age int32 `copier:"must,nopanic"`
+
+ // Tell copier.Copy to explicitly ignore copying this field.
+ Salary int `copier:"-"`
+
+ DoubleAge int32
+ EmployeId int64
+ SuperRole string
+}
+
+func (employee *Employee) Role(role string) {
+ employee.SuperRole = "Super " + role
+}
+
+func main() {
+ var (
+ user = User{Name: "Jinzhu", Age: 18, Role: "Admin", Salary: 200000}
+ users = []User{{Name: "Jinzhu", Age: 18, Role: "Admin", Salary: 100000}, {Name: "jinzhu 2", Age: 30, Role: "Dev", Salary: 60000}}
+ employee = Employee{Salary: 150000}
+ employees = []Employee{}
+ )
+
+ copier.Copy(&employee, &user)
+
+ fmt.Printf("%#v \n", employee)
+ // Employee{
+ // Name: "Jinzhu", // Copy from field
+ // Age: 18, // Copy from field
+ // Salary:150000, // Copying explicitly ignored
+ // DoubleAge: 36, // Copy from method
+ // EmployeeId: 0, // Ignored
+ // SuperRole: "Super Admin", // Copy to method
+ // }
+
+ // Copy struct to slice
+ copier.Copy(&employees, &user)
+
+ fmt.Printf("%#v \n", employees)
+ // []Employee{
+ // {Name: "Jinzhu", Age: 18, Salary:0, DoubleAge: 36, EmployeId: 0, SuperRole: "Super Admin"}
+ // }
+
+ // Copy slice to slice
+ employees = []Employee{}
+ copier.Copy(&employees, &users)
+
+ fmt.Printf("%#v \n", employees)
+ // []Employee{
+ // {Name: "Jinzhu", Age: 18, Salary:0, DoubleAge: 36, EmployeId: 0, SuperRole: "Super Admin"},
+ // {Name: "jinzhu 2", Age: 30, Salary:0, DoubleAge: 60, EmployeId: 0, SuperRole: "Super Dev"},
+ // }
+
+ // Copy map to map
+ map1 := map[int]int{3: 6, 4: 8}
+ map2 := map[int32]int8{}
+ copier.Copy(&map2, map1)
+
+ fmt.Printf("%#v \n", map2)
+ // map[int32]int8{3:6, 4:8}
+}
+```
+
+### Copy with Option
+
+```go
+copier.CopyWithOption(&to, &from, copier.Option{IgnoreEmpty: true, DeepCopy: true})
+```
+
+## Contributing
+
+You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do.
+
+# Author
+
+**jinzhu**
+
+* <http://github.com/jinzhu>
+* <wosmvp@gmail.com>
+* <http://twitter.com/zhangjinzhu>
+
+## License
+
+Released under the [MIT License](https://github.com/jinzhu/copier/blob/master/License).
diff --git a/vendor/github.com/jinzhu/copier/copier.go b/vendor/github.com/jinzhu/copier/copier.go
new file mode 100644
index 000000000..72bf65c78
--- /dev/null
+++ b/vendor/github.com/jinzhu/copier/copier.go
@@ -0,0 +1,491 @@
+package copier
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// These flags define options for tag handling
+const (
+ // Denotes that a destination field must be copied to. If copying fails then a panic will ensue.
+ tagMust uint8 = 1 << iota
+
+ // Denotes that the program should not panic when the must flag is on and
+ // value is not copied. The program will return an error instead.
+ tagNoPanic
+
+ // Ignore a destination field from being copied to.
+ tagIgnore
+
+ // Denotes that the value as been copied
+ hasCopied
+)
+
+// Option sets copy options
+type Option struct {
+ // setting this value to true will ignore copying zero values of all the fields, including bools, as well as a
+ // struct having all it's fields set to their zero values respectively (see IsZero() in reflect/value.go)
+ IgnoreEmpty bool
+ DeepCopy bool
+}
+
+// Copy copy things
+func Copy(toValue interface{}, fromValue interface{}) (err error) {
+ return copier(toValue, fromValue, Option{})
+}
+
+// CopyWithOption copy with option
+func CopyWithOption(toValue interface{}, fromValue interface{}, opt Option) (err error) {
+ return copier(toValue, fromValue, opt)
+}
+
+func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) {
+ var (
+ isSlice bool
+ amount = 1
+ from = indirect(reflect.ValueOf(fromValue))
+ to = indirect(reflect.ValueOf(toValue))
+ )
+
+ if !to.CanAddr() {
+ return ErrInvalidCopyDestination
+ }
+
+ // Return is from value is invalid
+ if !from.IsValid() {
+ return ErrInvalidCopyFrom
+ }
+
+ fromType, isPtrFrom := indirectType(from.Type())
+ toType, _ := indirectType(to.Type())
+
+ if fromType.Kind() == reflect.Interface {
+ fromType = reflect.TypeOf(from.Interface())
+ }
+
+ if toType.Kind() == reflect.Interface {
+ toType, _ = indirectType(reflect.TypeOf(to.Interface()))
+ oldTo := to
+ to = reflect.New(reflect.TypeOf(to.Interface())).Elem()
+ defer func() {
+ oldTo.Set(to)
+ }()
+ }
+
+ // Just set it if possible to assign for normal types
+ if from.Kind() != reflect.Slice && from.Kind() != reflect.Struct && from.Kind() != reflect.Map && (from.Type().AssignableTo(to.Type()) || from.Type().ConvertibleTo(to.Type())) {
+ if !isPtrFrom || !opt.DeepCopy {
+ to.Set(from.Convert(to.Type()))
+ } else {
+ fromCopy := reflect.New(from.Type())
+ fromCopy.Set(from.Elem())
+ to.Set(fromCopy.Convert(to.Type()))
+ }
+ return
+ }
+
+ if from.Kind() != reflect.Slice && fromType.Kind() == reflect.Map && toType.Kind() == reflect.Map {
+ if !fromType.Key().ConvertibleTo(toType.Key()) {
+ return ErrMapKeyNotMatch
+ }
+
+ if to.IsNil() {
+ to.Set(reflect.MakeMapWithSize(toType, from.Len()))
+ }
+
+ for _, k := range from.MapKeys() {
+ toKey := indirect(reflect.New(toType.Key()))
+ if !set(toKey, k, opt.DeepCopy) {
+ return fmt.Errorf("%w map, old key: %v, new key: %v", ErrNotSupported, k.Type(), toType.Key())
+ }
+
+ elemType, _ := indirectType(toType.Elem())
+ toValue := indirect(reflect.New(elemType))
+ if !set(toValue, from.MapIndex(k), opt.DeepCopy) {
+ if err = copier(toValue.Addr().Interface(), from.MapIndex(k).Interface(), opt); err != nil {
+ return err
+ }
+ }
+
+ for {
+ if elemType == toType.Elem() {
+ to.SetMapIndex(toKey, toValue)
+ break
+ }
+ elemType = reflect.PtrTo(elemType)
+ toValue = toValue.Addr()
+ }
+ }
+ return
+ }
+
+ if from.Kind() == reflect.Slice && to.Kind() == reflect.Slice && fromType.ConvertibleTo(toType) {
+ if to.IsNil() {
+ slice := reflect.MakeSlice(reflect.SliceOf(to.Type().Elem()), from.Len(), from.Cap())
+ to.Set(slice)
+ }
+
+ for i := 0; i < from.Len(); i++ {
+ if to.Len() < i+1 {
+ to.Set(reflect.Append(to, reflect.New(to.Type().Elem()).Elem()))
+ }
+
+ if !set(to.Index(i), from.Index(i), opt.DeepCopy) {
+ err = CopyWithOption(to.Index(i).Addr().Interface(), from.Index(i).Interface(), opt)
+ if err != nil {
+ continue
+ }
+ }
+ }
+ return
+ }
+
+ if fromType.Kind() != reflect.Struct || toType.Kind() != reflect.Struct {
+ // skip not supported type
+ return
+ }
+
+ if to.Kind() == reflect.Slice {
+ isSlice = true
+ if from.Kind() == reflect.Slice {
+ amount = from.Len()
+ }
+ }
+
+ for i := 0; i < amount; i++ {
+ var dest, source reflect.Value
+
+ if isSlice {
+ // source
+ if from.Kind() == reflect.Slice {
+ source = indirect(from.Index(i))
+ } else {
+ source = indirect(from)
+ }
+ // dest
+ dest = indirect(reflect.New(toType).Elem())
+ } else {
+ source = indirect(from)
+ dest = indirect(to)
+ }
+
+ destKind := dest.Kind()
+ initDest := false
+ if destKind == reflect.Interface {
+ initDest = true
+ dest = indirect(reflect.New(toType))
+ }
+
+ // Get tag options
+ tagBitFlags := map[string]uint8{}
+ if dest.IsValid() {
+ tagBitFlags = getBitFlags(toType)
+ }
+
+ // check source
+ if source.IsValid() {
+ // Copy from source field to dest field or method
+ fromTypeFields := deepFields(fromType)
+ for _, field := range fromTypeFields {
+ name := field.Name
+
+ // Get bit flags for field
+ fieldFlags, _ := tagBitFlags[name]
+
+ // Check if we should ignore copying
+ if (fieldFlags & tagIgnore) != 0 {
+ continue
+ }
+
+ if fromField := source.FieldByName(name); fromField.IsValid() && !shouldIgnore(fromField, opt.IgnoreEmpty) {
+ // process for nested anonymous field
+ destFieldNotSet := false
+ if f, ok := dest.Type().FieldByName(name); ok {
+ for idx := range f.Index {
+ destField := dest.FieldByIndex(f.Index[:idx+1])
+
+ if destField.Kind() != reflect.Ptr {
+ continue
+ }
+
+ if !destField.IsNil() {
+ continue
+ }
+ if !destField.CanSet() {
+ destFieldNotSet = true
+ break
+ }
+
+ // destField is a nil pointer that can be set
+ newValue := reflect.New(destField.Type().Elem())
+ destField.Set(newValue)
+ }
+ }
+
+ if destFieldNotSet {
+ break
+ }
+
+ toField := dest.FieldByName(name)
+ if toField.IsValid() {
+ if toField.CanSet() {
+ if !set(toField, fromField, opt.DeepCopy) {
+ if err := copier(toField.Addr().Interface(), fromField.Interface(), opt); err != nil {
+ return err
+ }
+ }
+ if fieldFlags != 0 {
+ // Note that a copy was made
+ tagBitFlags[name] = fieldFlags | hasCopied
+ }
+ }
+ } else {
+ // try to set to method
+ var toMethod reflect.Value
+ if dest.CanAddr() {
+ toMethod = dest.Addr().MethodByName(name)
+ } else {
+ toMethod = dest.MethodByName(name)
+ }
+
+ if toMethod.IsValid() && toMethod.Type().NumIn() == 1 && fromField.Type().AssignableTo(toMethod.Type().In(0)) {
+ toMethod.Call([]reflect.Value{fromField})
+ }
+ }
+ }
+ }
+
+ // Copy from from method to dest field
+ for _, field := range deepFields(toType) {
+ name := field.Name
+
+ var fromMethod reflect.Value
+ if source.CanAddr() {
+ fromMethod = source.Addr().MethodByName(name)
+ } else {
+ fromMethod = source.MethodByName(name)
+ }
+
+ if fromMethod.IsValid() && fromMethod.Type().NumIn() == 0 && fromMethod.Type().NumOut() == 1 && !shouldIgnore(fromMethod, opt.IgnoreEmpty) {
+ if toField := dest.FieldByName(name); toField.IsValid() && toField.CanSet() {
+ values := fromMethod.Call([]reflect.Value{})
+ if len(values) >= 1 {
+ set(toField, values[0], opt.DeepCopy)
+ }
+ }
+ }
+ }
+ }
+
+ if isSlice {
+ if dest.Addr().Type().AssignableTo(to.Type().Elem()) {
+ if to.Len() < i+1 {
+ to.Set(reflect.Append(to, dest.Addr()))
+ } else {
+ set(to.Index(i), dest.Addr(), opt.DeepCopy)
+ }
+ } else if dest.Type().AssignableTo(to.Type().Elem()) {
+ if to.Len() < i+1 {
+ to.Set(reflect.Append(to, dest))
+ } else {
+ set(to.Index(i), dest, opt.DeepCopy)
+ }
+ }
+ } else if initDest {
+ to.Set(dest)
+ }
+
+ err = checkBitFlags(tagBitFlags)
+ }
+
+ return
+}
+
+func shouldIgnore(v reflect.Value, ignoreEmpty bool) bool {
+ if !ignoreEmpty {
+ return false
+ }
+
+ return v.IsZero()
+}
+
+func deepFields(reflectType reflect.Type) []reflect.StructField {
+ if reflectType, _ = indirectType(reflectType); reflectType.Kind() == reflect.Struct {
+ fields := make([]reflect.StructField, 0, reflectType.NumField())
+
+ for i := 0; i < reflectType.NumField(); i++ {
+ v := reflectType.Field(i)
+ if v.Anonymous {
+ fields = append(fields, deepFields(v.Type)...)
+ } else {
+ fields = append(fields, v)
+ }
+ }
+
+ return fields
+ }
+
+ return nil
+}
+
+func indirect(reflectValue reflect.Value) reflect.Value {
+ for reflectValue.Kind() == reflect.Ptr {
+ reflectValue = reflectValue.Elem()
+ }
+ return reflectValue
+}
+
+func indirectType(reflectType reflect.Type) (_ reflect.Type, isPtr bool) {
+ for reflectType.Kind() == reflect.Ptr || reflectType.Kind() == reflect.Slice {
+ reflectType = reflectType.Elem()
+ isPtr = true
+ }
+ return reflectType, isPtr
+}
+
+func set(to, from reflect.Value, deepCopy bool) bool {
+ if from.IsValid() {
+ if to.Kind() == reflect.Ptr {
+ // set `to` to nil if from is nil
+ if from.Kind() == reflect.Ptr && from.IsNil() {
+ to.Set(reflect.Zero(to.Type()))
+ return true
+ } else if to.IsNil() {
+ // `from` -> `to`
+ // sql.NullString -> *string
+ if fromValuer, ok := driverValuer(from); ok {
+ v, err := fromValuer.Value()
+ if err != nil {
+ return false
+ }
+ // if `from` is not valid do nothing with `to`
+ if v == nil {
+ return true
+ }
+ }
+ // allocate new `to` variable with default value (eg. *string -> new(string))
+ to.Set(reflect.New(to.Type().Elem()))
+ }
+ // depointer `to`
+ to = to.Elem()
+ }
+
+ if deepCopy {
+ toKind := to.Kind()
+ if toKind == reflect.Interface && to.IsNil() {
+ if reflect.TypeOf(from.Interface()) != nil {
+ to.Set(reflect.New(reflect.TypeOf(from.Interface())).Elem())
+ toKind = reflect.TypeOf(to.Interface()).Kind()
+ }
+ }
+ if toKind == reflect.Struct || toKind == reflect.Map || toKind == reflect.Slice {
+ return false
+ }
+ }
+
+ if from.Type().ConvertibleTo(to.Type()) {
+ to.Set(from.Convert(to.Type()))
+ } else if toScanner, ok := to.Addr().Interface().(sql.Scanner); ok {
+ // `from` -> `to`
+ // *string -> sql.NullString
+ if from.Kind() == reflect.Ptr {
+ // if `from` is nil do nothing with `to`
+ if from.IsNil() {
+ return true
+ }
+ // depointer `from`
+ from = indirect(from)
+ }
+ // `from` -> `to`
+ // string -> sql.NullString
+ // set `to` by invoking method Scan(`from`)
+ err := toScanner.Scan(from.Interface())
+ if err != nil {
+ return false
+ }
+ } else if fromValuer, ok := driverValuer(from); ok {
+ // `from` -> `to`
+ // sql.NullString -> string
+ v, err := fromValuer.Value()
+ if err != nil {
+ return false
+ }
+ // if `from` is not valid do nothing with `to`
+ if v == nil {
+ return true
+ }
+ rv := reflect.ValueOf(v)
+ if rv.Type().AssignableTo(to.Type()) {
+ to.Set(rv)
+ }
+ } else if from.Kind() == reflect.Ptr {
+ return set(to, from.Elem(), deepCopy)
+ } else {
+ return false
+ }
+ }
+
+ return true
+}
+
+// parseTags Parses struct tags and returns uint8 bit flags.
+func parseTags(tag string) (flags uint8) {
+ for _, t := range strings.Split(tag, ",") {
+ switch t {
+ case "-":
+ flags = tagIgnore
+ return
+ case "must":
+ flags = flags | tagMust
+ case "nopanic":
+ flags = flags | tagNoPanic
+ }
+ }
+ return
+}
+
+// getBitFlags Parses struct tags for bit flags.
+func getBitFlags(toType reflect.Type) map[string]uint8 {
+ flags := map[string]uint8{}
+ toTypeFields := deepFields(toType)
+
+ // Get a list dest of tags
+ for _, field := range toTypeFields {
+ tags := field.Tag.Get("copier")
+ if tags != "" {
+ flags[field.Name] = parseTags(tags)
+ }
+ }
+ return flags
+}
+
+// checkBitFlags Checks flags for error or panic conditions.
+func checkBitFlags(flagsList map[string]uint8) (err error) {
+ // Check flag conditions were met
+ for name, flags := range flagsList {
+ if flags&hasCopied == 0 {
+ switch {
+ case flags&tagMust != 0 && flags&tagNoPanic != 0:
+ err = fmt.Errorf("field %s has must tag but was not copied", name)
+ return
+ case flags&(tagMust) != 0:
+ panic(fmt.Sprintf("Field %s has must tag but was not copied", name))
+ }
+ }
+ }
+ return
+}
+
+func driverValuer(v reflect.Value) (i driver.Valuer, ok bool) {
+
+ if !v.CanAddr() {
+ i, ok = v.Interface().(driver.Valuer)
+ return
+ }
+
+ i, ok = v.Addr().Interface().(driver.Valuer)
+ return
+}
diff --git a/vendor/github.com/jinzhu/copier/errors.go b/vendor/github.com/jinzhu/copier/errors.go
new file mode 100644
index 000000000..cf7c5e74b
--- /dev/null
+++ b/vendor/github.com/jinzhu/copier/errors.go
@@ -0,0 +1,10 @@
+package copier
+
+import "errors"
+
+var (
+ ErrInvalidCopyDestination = errors.New("copy destination is invalid")
+ ErrInvalidCopyFrom = errors.New("copy from is invalid")
+ ErrMapKeyNotMatch = errors.New("map's key type doesn't match")
+ ErrNotSupported = errors.New("not supported")
+)
diff --git a/vendor/github.com/jinzhu/copier/go.mod b/vendor/github.com/jinzhu/copier/go.mod
new file mode 100644
index 000000000..531422dcb
--- /dev/null
+++ b/vendor/github.com/jinzhu/copier/go.mod
@@ -0,0 +1,3 @@
+module github.com/jinzhu/copier
+
+go 1.15
diff --git a/vendor/github.com/openshift/imagebuilder/README.md b/vendor/github.com/openshift/imagebuilder/README.md
index 748bff971..4acfaa2bb 100644
--- a/vendor/github.com/openshift/imagebuilder/README.md
+++ b/vendor/github.com/openshift/imagebuilder/README.md
@@ -102,6 +102,8 @@ Example of usage from OpenShift's experimental `dockerbuild` [command with mount
## Run conformance tests (very slow):
```
+docker rmi busybox; docker pull busybox
+docker rmi centos:7; docker pull centos:7
chmod -R go-w ./dockerclient/testdata
-go test ./dockerclient/conformance_test.go -tags conformance -timeout 30m
+go test ./dockerclient -tags conformance -timeout 30m
```
diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go
index dd8b09c05..df5269904 100644
--- a/vendor/github.com/openshift/imagebuilder/builder.go
+++ b/vendor/github.com/openshift/imagebuilder/builder.go
@@ -37,6 +37,8 @@ type Copy struct {
type Run struct {
Shell bool
Args []string
+ // Mounts are mounts specified through the --mount flag inside the Containerfile
+ Mounts []string
}
type Executor interface {
@@ -67,7 +69,7 @@ func (logExecutor) Copy(excludes []string, copies ...Copy) error {
}
func (logExecutor) Run(run Run, config docker.Config) error {
- log.Printf("RUN %v %t (%v)", run.Args, run.Shell, config.Env)
+ log.Printf("RUN %v %v %t (%v)", run.Args, run.Mounts, run.Shell, config.Env)
return nil
}
diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go
index 2294ae0a7..0d82136e7 100644
--- a/vendor/github.com/openshift/imagebuilder/dispatchers.go
+++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go
@@ -306,7 +306,26 @@ func run(b *Builder, args []string, attributes map[string]bool, flagArgs []strin
args = handleJSONArgs(args, attributes)
- run := Run{Args: args}
+ var mounts []string
+ userArgs := mergeEnv(envMapAsSlice(b.Args), b.Env)
+ for _, a := range flagArgs {
+ arg, err := ProcessWord(a, userArgs)
+ if err != nil {
+ return err
+ }
+ switch {
+ case strings.HasPrefix(arg, "--mount="):
+ mount := strings.TrimPrefix(arg, "--mount=")
+ mounts = append(mounts, mount)
+ default:
+ return fmt.Errorf("RUN only supports the --mount flag")
+ }
+ }
+
+ run := Run{
+ Args: args,
+ Mounts: mounts,
+ }
if !attributes["json"] {
run.Shell = true
diff --git a/vendor/github.com/openshift/imagebuilder/imagebuilder.spec b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec
index 684946ece..79d16ec39 100644
--- a/vendor/github.com/openshift/imagebuilder/imagebuilder.spec
+++ b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec
@@ -12,7 +12,7 @@
#
%global golang_version 1.8.1
-%{!?version: %global version 1.2.0}
+%{!?version: %global version 1.2.2-dev}
%{!?release: %global release 1}
%global package_name imagebuilder
%global product_name Container Image Builder