summaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/containers/buildah/.cirrus.yml6
-rw-r--r--vendor/github.com/containers/buildah/CHANGELOG.md112
-rw-r--r--vendor/github.com/containers/buildah/add.go635
-rw-r--r--vendor/github.com/containers/buildah/buildah.go2
-rw-r--r--vendor/github.com/containers/buildah/changelog.txt113
-rw-r--r--vendor/github.com/containers/buildah/commit.go12
-rw-r--r--vendor/github.com/containers/buildah/common.go79
-rw-r--r--vendor/github.com/containers/buildah/copier/copier.go1526
-rw-r--r--vendor/github.com/containers/buildah/copier/syscall_unix.go79
-rw-r--r--vendor/github.com/containers/buildah/copier/syscall_windows.go83
-rw-r--r--vendor/github.com/containers/buildah/copier/unwrap_112.go11
-rw-r--r--vendor/github.com/containers/buildah/copier/unwrap_113.go18
-rw-r--r--vendor/github.com/containers/buildah/copier/xattrs.go92
-rw-r--r--vendor/github.com/containers/buildah/copier/xattrs_unsupported.go15
-rw-r--r--vendor/github.com/containers/buildah/digester.go15
-rw-r--r--vendor/github.com/containers/buildah/go.mod10
-rw-r--r--vendor/github.com/containers/buildah/go.sum21
-rw-r--r--vendor/github.com/containers/buildah/image.go65
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go8
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/executor.go26
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/stage_executor.go548
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go9
-rw-r--r--vendor/github.com/containers/buildah/pkg/rusage/rusage.go48
-rw-r--r--vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go35
-rw-r--r--vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go18
-rw-r--r--vendor/github.com/containers/buildah/pull.go2
-rw-r--r--vendor/github.com/containers/buildah/run_linux.go15
-rw-r--r--vendor/github.com/containers/buildah/seccomp.go2
-rw-r--r--vendor/github.com/containers/buildah/selinux.go4
-rw-r--r--vendor/github.com/containers/buildah/selinux_unsupported.go4
-rw-r--r--vendor/github.com/containers/buildah/util.go287
-rw-r--r--vendor/github.com/seccomp/containers-golang/.gitignore2
-rw-r--r--vendor/github.com/seccomp/containers-golang/LICENSE190
-rw-r--r--vendor/github.com/seccomp/containers-golang/Makefile32
-rw-r--r--vendor/github.com/seccomp/containers-golang/README.md29
-rw-r--r--vendor/github.com/seccomp/containers-golang/conversion.go32
-rw-r--r--vendor/github.com/seccomp/containers-golang/go.mod16
-rw-r--r--vendor/github.com/seccomp/containers-golang/go.sum66
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp.json878
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go744
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp_linux.go191
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go45
-rw-r--r--vendor/github.com/seccomp/containers-golang/types.go98
-rw-r--r--vendor/modules.txt6
44 files changed, 2910 insertions, 3319 deletions
diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml
index 8fd652ce1..b105f589e 100644
--- a/vendor/github.com/containers/buildah/.cirrus.yml
+++ b/vendor/github.com/containers/buildah/.cirrus.yml
@@ -6,7 +6,7 @@ env:
#### Global variables used for all tasks
####
# Name of the ultimate destination branch for this CI run, PR or post-merge.
- DEST_BRANCH: "master"
+ DEST_BRANCH: "release-1.16"
GOPATH: "/var/tmp/go"
GOSRC: "${GOPATH}/src/github.com/containers/buildah"
# Overrides default location (/tmp/cirrus) for repo clone
@@ -295,11 +295,11 @@ gce_instance:
build_script: |
set -ex
- mkdir -p /nix
mkdir -p .cache
- mount --bind .cache /nix
+ mv .cache /nix
if [[ -z $(ls -A /nix) ]]; then podman run --rm --privileged -ti -v /:/mnt nixos/nix cp -rfT /nix /mnt/nix; fi
podman run --rm --privileged -ti -v /nix:/nix -v ${PWD}:${PWD} -w ${PWD} nixos/nix nix --print-build-logs --option cores 8 --option max-jobs 8 build --file nix/
+ mv /nix .cache
chown -Rf $(whoami) .cache
binaries_artifacts:
diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md
index a3f5f2f11..ecbd0540e 100644
--- a/vendor/github.com/containers/buildah/CHANGELOG.md
+++ b/vendor/github.com/containers/buildah/CHANGELOG.md
@@ -2,6 +2,118 @@
# Changelog
+## v1.16.1 (2020-09-10)
+ copier.Get(): hard link targets shouldn't be relative paths
+
+## v1.16.0 (2020-09-03)
+ fix build on 32bit arches
+ containerImageRef.NewImageSource(): don't always force timestamps
+ Add fuse module warning to image readme
+ Heed our retry delay option values when retrying commit/pull/push
+ Switch to containers/common for seccomp
+ Use --timestamp rather then --omit-timestamp
+ docs: remove outdated notice
+ docs: remove outdated notice
+ build-using-dockerfile: add a hidden --log-rusage flag
+ build(deps): bump github.com/containers/image/v5 from 5.5.1 to 5.5.2
+ Discard ReportWriter if user sets options.Quiet
+ build(deps): bump github.com/containers/common from 0.19.0 to 0.20.3
+ Fix ownership of content copied using COPY --from
+ newTarDigester: zero out timestamps in tar headers
+ Update nix pin with `make nixpkgs`
+ bud.bats: correct .dockerignore integration tests
+ Use pipes for copying
+ run: include stdout in error message
+ run: use the correct error for errors.Wrapf
+ copier: un-export internal types
+ copier: add Mkdir()
+ in_podman: don't get tripped up by $CIRRUS_CHANGE_TITLE
+ docs/buildah-commit.md: tweak some wording, add a --rm example
+ imagebuildah: don’t blank out destination names when COPYing
+ Replace retry functions with common/pkg/retry
+ StageExecutor.historyMatches: compare timestamps using .Equal
+ Update vendor of containers/common
+ Fix errors found in coverity scan
+ Change namespace handling flags to better match podman commands
+ conformance testing: ignore buildah.BuilderIdentityAnnotation labels
+ Vendor in containers/storage v1.23.0
+ Add buildah.IsContainer interface
+ Avoid feeding run_buildah to pipe
+ fix(buildahimage): add xz dependency in buildah image
+ Bump github.com/containers/common from 0.15.2 to 0.18.0
+ Howto for rootless image building from OpenShift
+ Add --omit-timestamp flag to buildah bud
+ Update nix pin with `make nixpkgs`
+ Shutdown storage on failures
+ Handle COPY --from when an argument is used
+ Bump github.com/seccomp/containers-golang from 0.5.0 to 0.6.0
+ Cirrus: Use newly built VM images
+ Bump github.com/opencontainers/runc from 1.0.0-rc91 to 1.0.0-rc92
+ Enhance the .dockerignore man pages
+ conformance: add a test for COPY from subdirectory
+ fix bug manifest inspct
+ Add documentation for .dockerignore
+ Add BuilderIdentityAnnotation to identify buildah version
+ DOC: Add quay.io/containers/buildah image to README.md
+ Update buildahimages readme
+ fix spelling mistake in "info" command result display
+ Don't bind /etc/host and /etc/resolv.conf if network is not present
+ blobcache: avoid an unnecessary NewImage()
+ Build static binary with `buildGoModule`
+ copier: split StripSetidBits into StripSetuidBit/StripSetgidBit/StripStickyBit
+ tarFilterer: handle multiple archives
+ Fix a race we hit during conformance tests
+ Rework conformance testing
+ Update 02-registries-repositories.md
+ test-unit: invoke cmd/buildah tests with --flags
+ parse: fix a type mismatch in a test
+ Fix compilation of tests/testreport/testreport
+ build.sh: log the version of Go that we're using
+ test-unit: increase the test timeout to 40/45 minutes
+ Add the "copier" package
+ Fix & add notes regarding problematic language in codebase
+ Add dependency on github.com/stretchr/testify/require
+ CompositeDigester: add the ability to filter tar streams
+ BATS tests: make more robust
+ vendor golang.org/x/text@v0.3.3
+ Switch golang 1.12 to golang 1.13
+ imagebuildah: wait for stages that might not have even started yet
+ chroot, run: not fail on bind mounts from /sys
+ chroot: do not use setgroups if it is blocked
+ Set engine env from containers.conf
+ imagebuildah: return the right stage's image as the "final" image
+ Fix a help string
+ Deduplicate environment variables
+ switch containers/libpod to containers/podman
+ Bump github.com/containers/ocicrypt from 1.0.2 to 1.0.3
+ Bump github.com/opencontainers/selinux from 1.5.2 to 1.6.0
+ Mask out /sys/dev to prevent information leak
+ linux: skip errors from the runtime kill
+ Mask over the /sys/fs/selinux in mask branch
+ Add VFS additional image store to container
+ tests: add auth tests
+ Allow "readonly" as alias to "ro" in mount options
+ Ignore OS X specific consistency mount option
+ Bump github.com/onsi/ginkgo from 1.13.0 to 1.14.0
+ Bump github.com/containers/common from 0.14.0 to 0.15.2
+ Rootless Buildah should default to IsolationOCIRootless
+ imagebuildah: fix inheriting multi-stage builds
+ Make imagebuildah.BuildOptions.Architecture/OS optional
+ Make imagebuildah.BuildOptions.Jobs optional
+ Resolve a possible race in imagebuildah.Executor.startStage()
+ Switch scripts to use containers.conf
+ Bump openshift/imagebuilder to v1.1.6
+ Bump go.etcd.io/bbolt from 1.3.4 to 1.3.5
+ buildah, bud: support --jobs=N for parallel execution
+ executor: refactor build code inside new function
+ Add bud regression tests
+ Cirrus: Fix missing htpasswd in registry img
+ docs: clarify the 'triples' format
+ CHANGELOG.md: Fix markdown formatting
+ Add nix derivation for static builds
+ Bump to v1.16.0-dev
+ version centos7 for compatible
+
## v1.15.0 (2020-06-17)
Bump github.com/containers/common from 0.12.0 to 0.13.1
Bump github.com/containers/storage from 1.20.1 to 1.20.2
diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go
index 425621028..1c1f116da 100644
--- a/vendor/github.com/containers/buildah/add.go
+++ b/vendor/github.com/containers/buildah/add.go
@@ -1,21 +1,25 @@
package buildah
import (
+ "archive/tar"
+ "fmt"
"io"
+ "io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strings"
+ "sync"
"syscall"
"time"
+ "github.com/containers/buildah/copier"
"github.com/containers/buildah/pkg/chrootuser"
- "github.com/containers/buildah/util"
- "github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
+ "github.com/hashicorp/go-multierror"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -25,17 +29,22 @@ import (
type AddAndCopyOptions struct {
// Chown is a spec for the user who should be given ownership over the
// newly-added content, potentially overriding permissions which would
- // otherwise match those of local files and directories being copied.
+ // otherwise be set to 0:0.
Chown string
+ // PreserveOwnership, if Chown is not set, tells us to avoid setting
+ // ownership of copied items to 0:0, instead using whatever ownership
+ // information is already set. Not meaningful for remote sources.
+ PreserveOwnership bool
// All of the data being copied will pass through Hasher, if set.
// If the sources are URLs or files, their contents will be passed to
// Hasher.
// If the sources include directory trees, Hasher will be passed
// tar-format archives of the directory trees.
Hasher io.Writer
- // Excludes is the contents of the .dockerignore file
+ // Excludes is the contents of the .dockerignore file.
Excludes []string
- // ContextDir is the base directory for Excludes for content being copied
+ // ContextDir is the base directory for content being copied and
+ // Excludes patterns.
ContextDir string
// ID mapping options to use when contents to be copied are part of
// another container, and need ownerships to be mapped from the host to
@@ -44,74 +53,93 @@ type AddAndCopyOptions struct {
// DryRun indicates that the content should be digested, but not actually
// copied into the container.
DryRun bool
+ // Clear the setuid bit on items being copied. Has no effect on
+ // archives being extracted, where the bit is always preserved.
+ StripSetuidBit bool
+ // Clear the setgid bit on items being copied. Has no effect on
+ // archives being extracted, where the bit is always preserved.
+ StripSetgidBit bool
+ // Clear the sticky bit on items being copied. Has no effect on
+ // archives being extracted, where the bit is always preserved.
+ StripStickyBit bool
}
-// addURL copies the contents of the source URL to the destination. This is
-// its own function so that deferred closes happen after we're done pulling
-// down each item of potentially many.
-func (b *Builder) addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer, dryRun bool) error {
- resp, err := http.Get(srcurl)
+// sourceIsRemote returns true if "source" is a remote location.
+func sourceIsRemote(source string) bool {
+ return strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://")
+}
+
+// getURL writes a tar archive containing the named content
+func getURL(src, mountpoint, renameTarget string, writer io.Writer) error {
+ url, err := url.Parse(src)
if err != nil {
- return errors.Wrapf(err, "error getting %q", srcurl)
+ return errors.Wrapf(err, "error parsing URL %q", url)
}
- defer resp.Body.Close()
-
- thisHasher := hasher
- if thisHasher != nil && b.ContentDigester.Hash() != nil {
- thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
+ response, err := http.Get(src)
+ if err != nil {
+ return errors.Wrapf(err, "error parsing URL %q", url)
}
- if thisHasher == nil {
- thisHasher = b.ContentDigester.Hash()
+ defer response.Body.Close()
+ // Figure out what to name the new content.
+ name := renameTarget
+ if name == "" {
+ name = path.Base(url.Path)
}
- thisWriter := thisHasher
-
- if !dryRun {
- logrus.Debugf("saving %q to %q", srcurl, destination)
- f, err := os.Create(destination)
+ // If there's a date on the content, use it. If not, use the Unix epoch
+ // for compatibility.
+ date := time.Unix(0, 0).UTC()
+ lastModified := response.Header.Get("Last-Modified")
+ if lastModified != "" {
+ d, err := time.Parse(time.RFC1123, lastModified)
if err != nil {
- return errors.Wrapf(err, "error creating %q", destination)
+ return errors.Wrapf(err, "error parsing last-modified time %q", lastModified)
}
+ date = d
+ }
+ // Figure out the size of the content.
+ size := response.ContentLength
+ responseBody := response.Body
+ if size < 0 {
+ // Create a temporary file and copy the content to it, so that
+ // we can figure out how much content there is.
+ f, err := ioutil.TempFile(mountpoint, "download")
+ if err != nil {
+ return errors.Wrapf(err, "error creating temporary file to hold %q", src)
+ }
+ defer os.Remove(f.Name())
defer f.Close()
- if err = f.Chown(owner.UID, owner.GID); err != nil {
- return errors.Wrapf(err, "error setting owner of %q to %d:%d", destination, owner.UID, owner.GID)
+ size, err = io.Copy(f, response.Body)
+ if err != nil {
+ return errors.Wrapf(err, "error writing %q to temporary file %q", src, f.Name())
}
- if last := resp.Header.Get("Last-Modified"); last != "" {
- if mtime, err2 := time.Parse(time.RFC1123, last); err2 != nil {
- logrus.Debugf("error parsing Last-Modified time %q: %v", last, err2)
- } else {
- defer func() {
- if err3 := os.Chtimes(destination, time.Now(), mtime); err3 != nil {
- logrus.Debugf("error setting mtime on %q to Last-Modified time %q: %v", destination, last, err3)
- }
- }()
- }
+ _, err = f.Seek(0, io.SeekStart)
+ if err != nil {
+ return errors.Wrapf(err, "error setting up to read %q from temporary file %q", src, f.Name())
}
- defer func() {
- if err2 := f.Chmod(0600); err2 != nil {
- logrus.Debugf("error setting permissions on %q: %v", destination, err2)
- }
- }()
- thisWriter = io.MultiWriter(f, thisWriter)
+ responseBody = f
}
-
- n, err := io.Copy(thisWriter, resp.Body)
- if err != nil {
- return errors.Wrapf(err, "error reading contents for %q from %q", destination, srcurl)
+ // Write the output archive. Set permissions for compatibility.
+ tw := tar.NewWriter(writer)
+ defer tw.Close()
+ hdr := tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: name,
+ Size: size,
+ Mode: 0600,
+ ModTime: date,
}
- if resp.ContentLength >= 0 && n != resp.ContentLength {
- return errors.Errorf("error reading contents for %q from %q: wrong length (%d != %d)", destination, srcurl, n, resp.ContentLength)
+ err = tw.WriteHeader(&hdr)
+ if err != nil {
+ return errors.Wrapf(err, "error writing header")
}
- return nil
+ _, err = io.Copy(tw, responseBody)
+ return errors.Wrapf(err, "error writing content from %q to tar stream", src)
}
// Add copies the contents of the specified sources into the container's root
// filesystem, optionally extracting contents of local files that look like
// non-empty archives.
-func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, source ...string) error {
- excludes, err := dockerIgnoreMatcher(options.Excludes, options.ContextDir)
- if err != nil {
- return err
- }
+func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, sources ...string) error {
mountPoint, err := b.Mount(b.MountLabel)
if err != nil {
return err
@@ -121,267 +149,336 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
logrus.Errorf("error unmounting container: %v", err2)
}
}()
- // Find out which user (and group) the destination should belong to.
- user, _, err := b.user(mountPoint, options.Chown)
- if err != nil {
- return err
- }
- containerOwner := idtools.IDPair{UID: int(user.UID), GID: int(user.GID)}
- hostUID, hostGID, err := util.GetHostIDs(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap, user.UID, user.GID)
- if err != nil {
- return err
+
+ contextDir := options.ContextDir
+ if contextDir == "" {
+ contextDir = string(os.PathSeparator)
}
- hostOwner := idtools.IDPair{UID: int(hostUID), GID: int(hostGID)}
- dest := mountPoint
- if !options.DryRun {
- // Resolve the destination if it was specified as a relative path.
- if destination != "" && filepath.IsAbs(destination) {
- dir := filepath.Dir(destination)
- if dir != "." && dir != "/" {
- if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, dir), 0755, hostOwner); err != nil {
- return errors.Wrapf(err, "error creating directory %q", filepath.Join(dest, dir))
- }
- }
- dest = filepath.Join(dest, destination)
- } else {
- if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, b.WorkDir()), 0755, hostOwner); err != nil {
- return errors.Wrapf(err, "error creating directory %q", filepath.Join(dest, b.WorkDir()))
- }
- dest = filepath.Join(dest, b.WorkDir(), destination)
+
+ // Figure out what sorts of sources we have.
+ var localSources, remoteSources []string
+ for _, src := range sources {
+ if sourceIsRemote(src) {
+ remoteSources = append(remoteSources, src)
+ continue
}
- // If the destination was explicitly marked as a directory by ending it
- // with a '/', create it so that we can be sure that it's a directory,
- // and any files we're copying will be placed in the directory.
- if len(destination) > 0 && destination[len(destination)-1] == os.PathSeparator {
- if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil {
- return errors.Wrapf(err, "error creating directory %q", dest)
- }
+ localSources = append(localSources, src)
+ }
+
+ // Check how many items our local source specs matched. Each spec
+ // should have matched at least one item, otherwise we consider it an
+ // error.
+ var localSourceStats []*copier.StatsForGlob
+ if len(localSources) > 0 {
+ statOptions := copier.StatOptions{
+ CheckForArchives: extract,
}
- // Make sure the destination's parent directory is usable.
- if destpfi, err2 := os.Stat(filepath.Dir(dest)); err2 == nil && !destpfi.IsDir() {
- return errors.Errorf("%q already exists, but is not a subdirectory)", filepath.Dir(dest))
+ localSourceStats, err = copier.Stat(contextDir, contextDir, statOptions, localSources)
+ if err != nil {
+ return errors.Wrapf(err, "error checking on sources %v under %q", localSources, contextDir)
}
}
- // Now look at the destination itself.
- destfi, err := os.Stat(dest)
- if err != nil {
- if !os.IsNotExist(err) {
- return errors.Wrapf(err, "couldn't determine what %q is", dest)
+ numLocalSourceItems := 0
+ for _, localSourceStat := range localSourceStats {
+ if localSourceStat.Error != "" {
+ errorText := localSourceStat.Error
+ rel, err := filepath.Rel(contextDir, localSourceStat.Glob)
+ if err != nil {
+ errorText = fmt.Sprintf("%v; %s", err, errorText)
+ }
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ errorText = fmt.Sprintf("possible escaping context directory error: %s", errorText)
+ }
+ return errors.Errorf("error checking on source %v under %q: %v", localSourceStat.Glob, contextDir, errorText)
}
- destfi = nil
- }
- if len(source) > 1 && (destfi == nil || !destfi.IsDir()) {
- return errors.Errorf("destination %q is not a directory", dest)
+ if len(localSourceStat.Globbed) == 0 {
+ return errors.Wrapf(syscall.ENOENT, "error checking on source %v under %q: no glob matches", localSourceStat.Glob, contextDir)
+ }
+ numLocalSourceItems += len(localSourceStat.Globbed)
}
- copyFileWithTar := b.copyFileWithTar(options.IDMappingOptions, &containerOwner, options.Hasher, options.DryRun)
- copyWithTar := b.copyWithTar(options.IDMappingOptions, &containerOwner, options.Hasher, options.DryRun)
- untarPath := b.untarPath(nil, options.Hasher, options.DryRun)
- err = b.addHelper(excludes, extract, dest, destfi, hostOwner, options, copyFileWithTar, copyWithTar, untarPath, source...)
- if err != nil {
- return err
+ if numLocalSourceItems+len(remoteSources) == 0 {
+ return errors.Wrapf(syscall.ENOENT, "no sources %v found", sources)
}
- return nil
-}
-// user returns the user (and group) information which the destination should belong to.
-func (b *Builder) user(mountPoint string, userspec string) (specs.User, string, error) {
- if userspec == "" {
- userspec = b.User()
+ // Find out which user (and group) the destination should belong to.
+ var chownDirs, chownFiles *idtools.IDPair
+ var chmodDirs, chmodFiles *os.FileMode
+ var user specs.User
+ if options.Chown != "" {
+ user, _, err = b.user(mountPoint, options.Chown)
+ if err != nil {
+ return errors.Wrapf(err, "error looking up UID/GID for %q", options.Chown)
+ }
+ }
+ chownDirs = &idtools.IDPair{UID: int(user.UID), GID: int(user.GID)}
+ chownFiles = &idtools.IDPair{UID: int(user.UID), GID: int(user.GID)}
+ if options.Chown == "" && options.PreserveOwnership {
+ chownDirs = nil
+ chownFiles = nil
}
- uid, gid, homeDir, err := chrootuser.GetUser(mountPoint, userspec)
- u := specs.User{
- UID: uid,
- GID: gid,
- Username: userspec,
+ // If we have a single source archive to extract, or more than one
+ // source item, or the destination has a path separator at the end of
+ // it, and it's not a remote URL, the destination needs to be a
+ // directory.
+ if destination == "" || !filepath.IsAbs(destination) {
+ tmpDestination := filepath.Join(string(os.PathSeparator)+b.WorkDir(), destination)
+ if destination == "" || strings.HasSuffix(destination, string(os.PathSeparator)) {
+ destination = tmpDestination + string(os.PathSeparator)
+ } else {
+ destination = tmpDestination
+ }
}
- if !strings.Contains(userspec, ":") {
- groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
- if err2 != nil {
- if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil {
- err = err2
+ destMustBeDirectory := (len(sources) > 1) || strings.HasSuffix(destination, string(os.PathSeparator))
+ destCanBeFile := false
+ if len(sources) == 1 {
+ if len(remoteSources) == 1 {
+ destCanBeFile = sourceIsRemote(sources[0])
+ }
+ if len(localSources) == 1 {
+ item := localSourceStats[0].Results[localSourceStats[0].Globbed[0]]
+ if item.IsDir || (item.IsArchive && extract) {
+ destMustBeDirectory = true
+ }
+ if item.IsRegular {
+ destCanBeFile = true
}
- } else {
- u.AdditionalGids = groups
}
-
}
- return u, homeDir, err
-}
-// dockerIgnoreMatcher returns a matcher based on the contents of the .dockerignore file under contextDir
-func dockerIgnoreMatcher(lines []string, contextDir string) (*fileutils.PatternMatcher, error) {
- // if there's no context dir, there's no .dockerignore file to consult
- if contextDir == "" {
- return nil, nil
+ // We care if the destination either doesn't exist, or exists and is a
+ // file. If the source can be a single file, for those cases we treat
+ // the destination as a file rather than as a directory tree.
+ renameTarget := ""
+ extractDirectory := filepath.Join(mountPoint, destination)
+ statOptions := copier.StatOptions{
+ CheckForArchives: extract,
}
- // If there's no .dockerignore file, then we don't have to add a
- // pattern to tell copy logic to ignore it later.
- var patterns []string
- if _, err := os.Stat(filepath.Join(contextDir, ".dockerignore")); err == nil || !os.IsNotExist(err) {
- patterns = []string{".dockerignore"}
+ destStats, err := copier.Stat(mountPoint, filepath.Join(mountPoint, b.WorkDir()), statOptions, []string{extractDirectory})
+ if err != nil {
+ return errors.Wrapf(err, "error checking on destination %v", extractDirectory)
}
- for _, ignoreSpec := range lines {
- ignoreSpec = strings.TrimSpace(ignoreSpec)
- // ignore comments passed back from .dockerignore
- if ignoreSpec == "" || ignoreSpec[0] == '#' {
- continue
- }
- // if the spec starts with '!' it means the pattern
- // should be included. make a note so that we can move
- // it to the front of the updated pattern, and insert
- // the context dir's path in between
- includeFlag := ""
- if strings.HasPrefix(ignoreSpec, "!") {
- includeFlag = "!"
- ignoreSpec = ignoreSpec[1:]
- }
- if ignoreSpec == "" {
- continue
- }
- patterns = append(patterns, includeFlag+filepath.Join(contextDir, ignoreSpec))
+ if (len(destStats) == 0 || len(destStats[0].Globbed) == 0) && !destMustBeDirectory && destCanBeFile {
+ // destination doesn't exist - extract to parent and rename the incoming file to the destination's name
+ renameTarget = filepath.Base(extractDirectory)
+ extractDirectory = filepath.Dir(extractDirectory)
}
- // if there are no patterns, save time by not constructing the object
- if len(patterns) == 0 {
- return nil, nil
+ if len(destStats) == 1 && len(destStats[0].Globbed) == 1 && destStats[0].Results[destStats[0].Globbed[0]].IsRegular {
+ if destMustBeDirectory {
+ return errors.Errorf("destination %v already exists but is not a directory", destination)
+ }
+ // destination exists - it's a file, we need to extract to parent and rename the incoming file to the destination's name
+ renameTarget = filepath.Base(extractDirectory)
+ extractDirectory = filepath.Dir(extractDirectory)
}
- // return a matcher object
- matcher, err := fileutils.NewPatternMatcher(patterns)
+
+ pm, err := fileutils.NewPatternMatcher(options.Excludes)
if err != nil {
- return nil, errors.Wrapf(err, "error creating file matcher using patterns %v", patterns)
+ return errors.Wrapf(err, "error processing excludes list %v", options.Excludes)
}
- return matcher, nil
-}
-func (b *Builder) addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error {
- for n, src := range source {
- if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
- b.ContentDigester.Start("")
- // We assume that source is a file, and we're copying
- // it to the destination. If the destination is
- // already a directory, create a file inside of it.
- // Otherwise, the destination is the file to which
- // we'll save the contents.
- url, err := url.Parse(src)
- if err != nil {
- return errors.Wrapf(err, "error parsing URL %q", src)
+ // Copy each source in turn.
+ var srcUIDMap, srcGIDMap []idtools.IDMap
+ if options.IDMappingOptions != nil {
+ srcUIDMap, srcGIDMap = convertRuntimeIDMaps(options.IDMappingOptions.UIDMap, options.IDMappingOptions.GIDMap)
+ }
+ destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
+
+ for _, src := range sources {
+ var multiErr *multierror.Error
+ var getErr, closeErr, renameErr, putErr error
+ var wg sync.WaitGroup
+ if sourceIsRemote(src) {
+ pipeReader, pipeWriter := io.Pipe()
+ wg.Add(1)
+ go func() {
+ getErr = getURL(src, mountPoint, renameTarget, pipeWriter)
+ pipeWriter.Close()
+ wg.Done()
+ }()
+ wg.Add(1)
+ go func() {
+ b.ContentDigester.Start("")
+ hashCloser := b.ContentDigester.Hash()
+ hasher := io.Writer(hashCloser)
+ if options.Hasher != nil {
+ hasher = io.MultiWriter(hasher, options.Hasher)
+ }
+ if options.DryRun {
+ _, putErr = io.Copy(hasher, pipeReader)
+ } else {
+ putOptions := copier.PutOptions{
+ UIDMap: destUIDMap,
+ GIDMap: destGIDMap,
+ ChownDirs: chownDirs,
+ ChmodDirs: chmodDirs,
+ ChownFiles: chownFiles,
+ ChmodFiles: chmodFiles,
+ }
+ putErr = copier.Put(mountPoint, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
+ }
+ hashCloser.Close()
+ pipeReader.Close()
+ wg.Done()
+ }()
+ wg.Wait()
+ if getErr != nil {
+ getErr = errors.Wrapf(getErr, "error reading %q", src)
}
- d := dest
- if destfi != nil && destfi.IsDir() {
- d = filepath.Join(dest, path.Base(url.Path))
+ if putErr != nil {
+ putErr = errors.Wrapf(putErr, "error storing %q", src)
}
- if err = b.addURL(d, src, hostOwner, options.Hasher, options.DryRun); err != nil {
- return err
+ multiErr = multierror.Append(getErr, putErr)
+ if multiErr != nil && multiErr.ErrorOrNil() != nil {
+ if len(multiErr.Errors) > 1 {
+ return multiErr.ErrorOrNil()
+ }
+ return multiErr.Errors[0]
}
continue
}
- glob, err := filepath.Glob(src)
- if err != nil {
- return errors.Wrapf(err, "invalid glob %q", src)
+ // Dig out the result of running glob+stat on this source spec.
+ var localSourceStat *copier.StatsForGlob
+ for _, st := range localSourceStats {
+ if st.Glob == src {
+ localSourceStat = st
+ break
+ }
}
- if len(glob) == 0 {
- return errors.Wrapf(syscall.ENOENT, "no files found matching %q", src)
+ if localSourceStat == nil {
+ return errors.Errorf("internal error: should have statted %s, but we didn't?", src)
}
- for _, gsrc := range glob {
- esrc, err := filepath.EvalSymlinks(gsrc)
+ // Iterate through every item that matched the glob.
+ itemsCopied := 0
+ for _, glob := range localSourceStat.Globbed {
+ rel, err := filepath.Rel(contextDir, glob)
if err != nil {
- return errors.Wrapf(err, "error evaluating symlinks %q", gsrc)
+ return errors.Wrapf(err, "error computing path of %q", glob)
}
- srcfi, err := os.Stat(esrc)
- if err != nil {
- return errors.Wrapf(err, "error reading %q", esrc)
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return errors.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
}
- if srcfi.IsDir() {
- b.ContentDigester.Start("dir")
- // The source is a directory, so copy the contents of
- // the source directory into the target directory. Try
- // to create it first, so that if there's a problem,
- // we'll discover why that won't work.
- if !options.DryRun {
- if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil {
- return errors.Wrapf(err, "error creating directory %q", dest)
- }
+ // Check for dockerignore-style exclusion of this item.
+ if rel != "." {
+ matches, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
+ if err != nil {
+ return errors.Wrapf(err, "error checking if %q(%q) is excluded", glob, rel)
}
- logrus.Debugf("copying[%d] %q to %q", n, esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*")
-
- // Copy the whole directory because we do not exclude anything
- if excludes == nil {
- if err = copyWithTar(esrc, dest); err != nil {
- return errors.Wrapf(err, "error copying %q to %q", esrc, dest)
- }
+ if matches {
continue
}
- err := filepath.Walk(esrc, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
-
- res, err := excludes.MatchesResult(path)
- if err != nil {
- return errors.Wrapf(err, "error checking if %s is an excluded path", path)
- }
- // The latest match result has the highest priority,
- // which means that we only skip the filepath if
- // the last result matched.
- if res.IsMatched() {
- return nil
- }
-
- // combine the source's basename with the dest directory
- fpath, err := filepath.Rel(esrc, path)
- if err != nil {
- return errors.Wrapf(err, "error converting %s to a path relative to %s", path, esrc)
- }
- if err = copyFileWithTar(path, filepath.Join(dest, fpath)); err != nil {
- return errors.Wrapf(err, "error copying %q to %q", path, dest)
- }
- return nil
- })
- if err != nil {
- return err
- }
- continue
}
-
- // This source is a file
- // Check if the path matches the .dockerignore
- if excludes != nil {
- res, err := excludes.MatchesResult(esrc)
- if err != nil {
- return errors.Wrapf(err, "error checking if %s is an excluded path", esrc)
+ st := localSourceStat.Results[glob]
+ pipeReader, pipeWriter := io.Pipe()
+ wg.Add(1)
+ go func() {
+ renamedItems := 0
+ writer := io.WriteCloser(pipeWriter)
+ if renameTarget != "" {
+ writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
+ hdr.Name = renameTarget
+ renamedItems++
+ return false, false, nil
+ })
}
- // Skip the file if the pattern matches
- if res.IsMatched() {
- continue
+ getOptions := copier.GetOptions{
+ UIDMap: srcUIDMap,
+ GIDMap: srcGIDMap,
+ Excludes: options.Excludes,
+ ExpandArchives: extract,
+ StripSetuidBit: options.StripSetuidBit,
+ StripSetgidBit: options.StripSetgidBit,
+ StripStickyBit: options.StripStickyBit,
}
- }
-
- b.ContentDigester.Start("file")
-
- if !extract || !archive.IsArchivePath(esrc) {
- // This source is a file, and either it's not an
- // archive, or we don't care whether or not it's an
- // archive.
- d := dest
- if destfi != nil && destfi.IsDir() {
- d = filepath.Join(dest, filepath.Base(gsrc))
+ getErr = copier.Get(contextDir, contextDir, getOptions, []string{glob}, writer)
+ closeErr = writer.Close()
+ if renameTarget != "" && renamedItems > 1 {
+ renameErr = errors.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems)
+ }
+ wg.Done()
+ }()
+ wg.Add(1)
+ go func() {
+ if st.IsDir {
+ b.ContentDigester.Start("dir")
+ } else {
+ b.ContentDigester.Start("file")
}
- // Copy the file, preserving attributes.
- logrus.Debugf("copying[%d] %q to %q", n, esrc, d)
- if err = copyFileWithTar(esrc, d); err != nil {
- return errors.Wrapf(err, "error copying %q to %q", esrc, d)
+ hashCloser := b.ContentDigester.Hash()
+ hasher := io.Writer(hashCloser)
+ if options.Hasher != nil {
+ hasher = io.MultiWriter(hasher, options.Hasher)
+ }
+ if options.DryRun {
+ _, putErr = io.Copy(hasher, pipeReader)
+ } else {
+ putOptions := copier.PutOptions{
+ UIDMap: destUIDMap,
+ GIDMap: destGIDMap,
+ ChownDirs: chownDirs,
+ ChmodDirs: chmodDirs,
+ ChownFiles: chownFiles,
+ ChmodFiles: chmodFiles,
+ }
+ putErr = copier.Put(mountPoint, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
}
- continue
+ hashCloser.Close()
+ pipeReader.Close()
+ wg.Done()
+ }()
+ wg.Wait()
+ if getErr != nil {
+ getErr = errors.Wrapf(getErr, "error reading %q", src)
}
-
- // We're extracting an archive into the destination directory.
- logrus.Debugf("extracting contents[%d] of %q into %q", n, esrc, dest)
- if err = untarPath(esrc, dest); err != nil {
- return errors.Wrapf(err, "error extracting %q into %q", esrc, dest)
+ if closeErr != nil {
+ closeErr = errors.Wrapf(closeErr, "error closing %q", src)
+ }
+ if renameErr != nil {
+ renameErr = errors.Wrapf(renameErr, "error renaming %q", src)
+ }
+ if putErr != nil {
+ putErr = errors.Wrapf(putErr, "error storing %q", src)
+ }
+ multiErr = multierror.Append(getErr, closeErr, renameErr, putErr)
+ if multiErr != nil && multiErr.ErrorOrNil() != nil {
+ if len(multiErr.Errors) > 1 {
+ return multiErr.ErrorOrNil()
+ }
+ return multiErr.Errors[0]
}
+ itemsCopied++
+ }
+ if itemsCopied == 0 {
+ return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered)", localSourceStat.Glob, len(localSourceStat.Globbed))
}
}
return nil
}
+
+// user returns the user (and group) information which the destination should belong to.
+func (b *Builder) user(mountPoint string, userspec string) (specs.User, string, error) {
+ if userspec == "" {
+ userspec = b.User()
+ }
+
+ uid, gid, homeDir, err := chrootuser.GetUser(mountPoint, userspec)
+ u := specs.User{
+ UID: uid,
+ GID: gid,
+ Username: userspec,
+ }
+ if !strings.Contains(userspec, ":") {
+ groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
+ if err2 != nil {
+ if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil {
+ err = err2
+ }
+ } else {
+ u.AdditionalGids = groups
+ }
+
+ }
+ return u, homeDir, err
+}
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index f5be7efbd..d001b8a10 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -28,7 +28,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.16.0-dev"
+ Version = "1.16.1"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt
index ab0fd2415..ec5db6eac 100644
--- a/vendor/github.com/containers/buildah/changelog.txt
+++ b/vendor/github.com/containers/buildah/changelog.txt
@@ -1,3 +1,116 @@
+
+- Changelog for v1.16.1 (2020-09-10)
+ * copier.Get(): hard link targets shouldn't be relative paths
+
+- Changelog for v1.16.0 (2020-09-03)
+ * fix build on 32bit arches
+ * containerImageRef.NewImageSource(): don't always force timestamps
+ * Add fuse module warning to image readme
+ * Heed our retry delay option values when retrying commit/pull/push
+ * Switch to containers/common for seccomp
+ * Use --timestamp rather then --omit-timestamp
+ * docs: remove outdated notice
+ * docs: remove outdated notice
+ * build-using-dockerfile: add a hidden --log-rusage flag
+ * build(deps): bump github.com/containers/image/v5 from 5.5.1 to 5.5.2
+ * Discard ReportWriter if user sets options.Quiet
+ * build(deps): bump github.com/containers/common from 0.19.0 to 0.20.3
+ * Fix ownership of content copied using COPY --from
+ * newTarDigester: zero out timestamps in tar headers
+ * Update nix pin with `make nixpkgs`
+ * bud.bats: correct .dockerignore integration tests
+ * Use pipes for copying
+ * run: include stdout in error message
+ * run: use the correct error for errors.Wrapf
+ * copier: un-export internal types
+ * copier: add Mkdir()
+ * in_podman: don't get tripped up by $CIRRUS_CHANGE_TITLE
+ * docs/buildah-commit.md: tweak some wording, add a --rm example
+ * imagebuildah: don’t blank out destination names when COPYing
+ * Replace retry functions with common/pkg/retry
+ * StageExecutor.historyMatches: compare timestamps using .Equal
+ * Update vendor of containers/common
+ * Fix errors found in coverity scan
+ * Change namespace handling flags to better match podman commands
+ * conformance testing: ignore buildah.BuilderIdentityAnnotation labels
+ * Vendor in containers/storage v1.23.0
+ * Add buildah.IsContainer interface
+ * Avoid feeding run_buildah to pipe
+ * fix(buildahimage): add xz dependency in buildah image
+ * Bump github.com/containers/common from 0.15.2 to 0.18.0
+ * Howto for rootless image building from OpenShift
+ * Add --omit-timestamp flag to buildah bud
+ * Update nix pin with `make nixpkgs`
+ * Shutdown storage on failures
+ * Handle COPY --from when an argument is used
+ * Bump github.com/seccomp/containers-golang from 0.5.0 to 0.6.0
+ * Cirrus: Use newly built VM images
+ * Bump github.com/opencontainers/runc from 1.0.0-rc91 to 1.0.0-rc92
+ * Enhance the .dockerignore man pages
+ * conformance: add a test for COPY from subdirectory
+ * fix bug manifest inspct
+ * Add documentation for .dockerignore
+ * Add BuilderIdentityAnnotation to identify buildah version
+ * DOC: Add quay.io/containers/buildah image to README.md
+ * Update buildahimages readme
+ * fix spelling mistake in "info" command result display
+ * Don't bind /etc/host and /etc/resolv.conf if network is not present
+ * blobcache: avoid an unnecessary NewImage()
+ * Build static binary with `buildGoModule`
+ * copier: split StripSetidBits into StripSetuidBit/StripSetgidBit/StripStickyBit
+ * tarFilterer: handle multiple archives
+ * Fix a race we hit during conformance tests
+ * Rework conformance testing
+ * Update 02-registries-repositories.md
+ * test-unit: invoke cmd/buildah tests with --flags
+ * parse: fix a type mismatch in a test
+ * Fix compilation of tests/testreport/testreport
+ * build.sh: log the version of Go that we're using
+ * test-unit: increase the test timeout to 40/45 minutes
+ * Add the "copier" package
+ * Fix & add notes regarding problematic language in codebase
+ * Add dependency on github.com/stretchr/testify/require
+ * CompositeDigester: add the ability to filter tar streams
+ * BATS tests: make more robust
+ * vendor golang.org/x/text@v0.3.3
+ * Switch golang 1.12 to golang 1.13
+ * imagebuildah: wait for stages that might not have even started yet
+ * chroot, run: not fail on bind mounts from /sys
+ * chroot: do not use setgroups if it is blocked
+ * Set engine env from containers.conf
+ * imagebuildah: return the right stage's image as the "final" image
+ * Fix a help string
+ * Deduplicate environment variables
+ * switch containers/libpod to containers/podman
+ * Bump github.com/containers/ocicrypt from 1.0.2 to 1.0.3
+ * Bump github.com/opencontainers/selinux from 1.5.2 to 1.6.0
+ * Mask out /sys/dev to prevent information leak
+ * linux: skip errors from the runtime kill
+ * Mask over the /sys/fs/selinux in mask branch
+ * Add VFS additional image store to container
+ * tests: add auth tests
+ * Allow "readonly" as alias to "ro" in mount options
+ * Ignore OS X specific consistency mount option
+ * Bump github.com/onsi/ginkgo from 1.13.0 to 1.14.0
+ * Bump github.com/containers/common from 0.14.0 to 0.15.2
+ * Rootless Buildah should default to IsolationOCIRootless
+ * imagebuildah: fix inheriting multi-stage builds
+ * Make imagebuildah.BuildOptions.Architecture/OS optional
+ * Make imagebuildah.BuildOptions.Jobs optional
+ * Resolve a possible race in imagebuildah.Executor.startStage()
+ * Switch scripts to use containers.conf
+ * Bump openshift/imagebuilder to v1.1.6
+ * Bump go.etcd.io/bbolt from 1.3.4 to 1.3.5
+ * buildah, bud: support --jobs=N for parallel execution
+ * executor: refactor build code inside new function
+ * Add bud regression tests
+ * Cirrus: Fix missing htpasswd in registry img
+ * docs: clarify the 'triples' format
+ * CHANGELOG.md: Fix markdown formatting
+ * Add nix derivation for static builds
+ * Bump to v1.16.0-dev
+ * add version centos7 for compatible
+
- Changelog for v1.15.0 (2020-06-17)
* Bump github.com/containers/common from 0.12.0 to 0.13.1
* Bump github.com/containers/storage from 1.20.1 to 1.20.2
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index 6c3febd5d..38601fbad 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -79,6 +79,7 @@ type CommitOptions struct {
EmptyLayer bool
// OmitTimestamp forces epoch 0 as created timestamp to allow for
// deterministic, content-addressable builds.
+ // Deprecated use HistoryTimestamp instead.
OmitTimestamp bool
// SignBy is the fingerprint of a GPG key to use for signing the image.
SignBy string
@@ -231,6 +232,13 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
// want to compute here because we'll have to do it again when
// cp.Image() instantiates a source image, and we don't want to do the
// work twice.
+ if options.OmitTimestamp {
+ if options.HistoryTimestamp != nil {
+ return imgID, nil, "", errors.Errorf("OmitTimestamp ahd HistoryTimestamp can not be used together")
+ }
+ timestamp := time.Unix(0, 0).UTC()
+ options.HistoryTimestamp = &timestamp
+ }
nameToRemove := ""
if dest == nil {
nameToRemove = stringid.GenerateRandomID() + "-tmp"
@@ -344,7 +352,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
var manifestBytes []byte
- if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, "push", getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
+ if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID)
}
// If we've got more names to attach, and we know how to do that for
@@ -476,7 +484,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
systemContext.DirForceCompress = true
}
var manifestBytes []byte
- if manifestBytes, err = retryCopyImage(ctx, policyContext, dest, maybeCachedSrc, dest, "push", getCopyOptions(options.Store, options.ReportWriter, nil, systemContext, options.ManifestType, options.RemoveSignatures, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
+ if manifestBytes, err = retryCopyImage(ctx, policyContext, dest, maybeCachedSrc, dest, getCopyOptions(options.Store, options.ReportWriter, nil, systemContext, options.ManifestType, options.RemoveSignatures, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(maybeCachedSrc), transports.ImageName(dest))
}
if options.ReportWriter != nil {
diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go
index b43cfffc9..594362300 100644
--- a/vendor/github.com/containers/buildah/common.go
+++ b/vendor/github.com/containers/buildah/common.go
@@ -3,13 +3,11 @@ package buildah
import (
"context"
"io"
- "net"
- "net/url"
"os"
"path/filepath"
- "syscall"
"time"
+ "github.com/containers/common/pkg/retry"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/signature"
@@ -17,11 +15,6 @@ import (
encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/unshare"
- "github.com/docker/distribution/registry/api/errcode"
- errcodev2 "github.com/docker/distribution/registry/api/v2"
- multierror "github.com/hashicorp/go-multierror"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
const (
@@ -76,64 +69,22 @@ func getSystemContext(store storage.Store, defaults *types.SystemContext, signat
return sc
}
-func isRetryable(err error) bool {
- err = errors.Cause(err)
- type unwrapper interface {
- Unwrap() error
- }
- if unwrapper, ok := err.(unwrapper); ok {
- err = unwrapper.Unwrap()
- return isRetryable(err)
- }
- if registryError, ok := err.(errcode.Error); ok {
- switch registryError.Code {
- case errcode.ErrorCodeUnauthorized, errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown:
- return false
- }
- return true
- }
- if op, ok := err.(*net.OpError); ok {
- return isRetryable(op.Err)
- }
- if url, ok := err.(*url.Error); ok {
- return isRetryable(url.Err)
- }
- if errno, ok := err.(syscall.Errno); ok {
- if errno == syscall.ECONNREFUSED {
- return false
- }
- }
- if errs, ok := err.(errcode.Errors); ok {
- // if this error is a group of errors, process them all in turn
- for i := range errs {
- if !isRetryable(errs[i]) {
- return false
- }
- }
- }
- if errs, ok := err.(*multierror.Error); ok {
- // if this error is a group of errors, process them all in turn
- for i := range errs.Errors {
- if !isRetryable(errs.Errors[i]) {
- return false
- }
- }
- }
- return true
-}
-
-func retryCopyImage(ctx context.Context, policyContext *signature.PolicyContext, dest, src, registry types.ImageReference, action string, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration) ([]byte, error) {
- manifestBytes, err := cp.Image(ctx, policyContext, dest, src, copyOptions)
- for retries := 0; err != nil && isRetryable(err) && registry != nil && registry.Transport().Name() == docker.Transport.Name() && retries < maxRetries; retries++ {
- if retryDelay == 0 {
- retryDelay = 5 * time.Second
- }
- logrus.Infof("Warning: %s failed, retrying in %s ... (%d/%d)", action, retryDelay, retries+1, maxRetries)
- time.Sleep(retryDelay)
+func retryCopyImage(ctx context.Context, policyContext *signature.PolicyContext, dest, src, registry types.ImageReference, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration) ([]byte, error) {
+ var (
+ manifestBytes []byte
+ err error
+ lastErr error
+ )
+ err = retry.RetryIfNecessary(ctx, func() error {
manifestBytes, err = cp.Image(ctx, policyContext, dest, src, copyOptions)
- if err == nil {
- break
+ if registry != nil && registry.Transport().Name() != docker.Transport.Name() {
+ lastErr = err
+ return nil
}
+ return err
+ }, &retry.RetryOptions{MaxRetry: maxRetries, Delay: retryDelay})
+ if lastErr != nil {
+ err = lastErr
}
return manifestBytes, err
}
diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go
new file mode 100644
index 000000000..a980fe292
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/copier.go
@@ -0,0 +1,1526 @@
+package copier
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/containers/buildah/util"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/storage/pkg/fileutils"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ copierCommand = "buildah-copier"
+ maxLoopsFollowed = 64
+ // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06, from archive/tar
+ cISUID = 04000 // Set uid, from archive/tar
+ cISGID = 02000 // Set gid, from archive/tar
+ cISVTX = 01000 // Save text (sticky bit), from archive/tar
+)
+
+func init() {
+ reexec.Register(copierCommand, copierMain)
+}
+
+// isArchivePath returns true if the specified path can be read like a (possibly
+// compressed) tarball.
+func isArchivePath(path string) bool {
+ f, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+ defer f.Close()
+ rc, _, err := compression.AutoDecompress(f)
+ if err != nil {
+ return false
+ }
+ defer rc.Close()
+ tr := tar.NewReader(rc)
+ _, err = tr.Next()
+ return err == nil
+}
+
+// requestType encodes exactly what kind of request this is.
+type requestType string
+
+const (
+ requestStat requestType = "STAT"
+ requestGet requestType = "GET"
+ requestPut requestType = "PUT"
+ requestMkdir requestType = "MKDIR"
+ requestQuit requestType = "QUIT"
+)
+
+// Request encodes a single request.
+type request struct {
+ Request requestType
+ Root string // used by all requests
+ preservedRoot string
+ rootPrefix string // used to reconstruct paths being handed back to the caller
+ Directory string // used by all requests
+ preservedDirectory string
+ Globs []string `json:",omitempty"` // used by stat, get
+ preservedGlobs []string
+ StatOptions StatOptions `json:",omitempty"`
+ GetOptions GetOptions `json:",omitempty"`
+ PutOptions PutOptions `json:",omitempty"`
+ MkdirOptions MkdirOptions `json:",omitempty"`
+}
+
+func (req *request) Excludes() []string {
+ switch req.Request {
+ case requestStat:
+ return req.StatOptions.Excludes
+ case requestGet:
+ return req.GetOptions.Excludes
+ case requestPut:
+ return nil
+ case requestMkdir:
+ return nil
+ case requestQuit:
+ return nil
+ default:
+ panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
+ }
+}
+
+func (req *request) UIDMap() []idtools.IDMap {
+ switch req.Request {
+ case requestStat:
+ return nil
+ case requestGet:
+ return req.GetOptions.UIDMap
+ case requestPut:
+ return req.PutOptions.UIDMap
+ case requestMkdir:
+ return req.MkdirOptions.UIDMap
+ case requestQuit:
+ return nil
+ default:
+ panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
+ }
+}
+
+func (req *request) GIDMap() []idtools.IDMap {
+ switch req.Request {
+ case requestStat:
+ return nil
+ case requestGet:
+ return req.GetOptions.GIDMap
+ case requestPut:
+ return req.PutOptions.GIDMap
+ case requestMkdir:
+ return req.MkdirOptions.GIDMap
+ case requestQuit:
+ return nil
+ default:
+ panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
+ }
+}
+
+// Response encodes a single response.
+type response struct {
+ Error string `json:",omitempty"`
+ Stat statResponse
+ Get getResponse
+ Put putResponse
+ Mkdir mkdirResponse
+}
+
+// statResponse encodes a response for a single Stat request.
+type statResponse struct {
+ Globs []*StatsForGlob
+}
+
+// StatsForGlob encode results for a single glob pattern passed to Stat().
+type StatsForGlob struct {
+ Error string `json:",omitempty"` // error if the Glob pattern was malformed
+ Glob string // input pattern to which this result corresponds
+ Globbed []string // a slice of zero or more names that match the glob
+ Results map[string]*StatForItem // one for each Globbed value if there are any, or for Glob
+}
+
+// StatForItem encode results for a single filesystem item, as returned by Stat().
+type StatForItem struct {
+ Error string `json:",omitempty"`
+ Name string
+ Size int64 // dereferenced value for symlinks
+ Mode os.FileMode // dereferenced value for symlinks
+ ModTime time.Time // dereferenced value for symlinks
+ IsSymlink bool
+ IsDir bool // dereferenced value for symlinks
+ IsRegular bool // dereferenced value for symlinks
+ IsArchive bool // dereferenced value for symlinks
+ ImmediateTarget string `json:",omitempty"` // raw link content
+}
+
+// getResponse encodes a response for a single Get request.
+type getResponse struct {
+}
+
+// putResponse encodes a response for a single Put request.
+type putResponse struct {
+}
+
+// mkdirResponse encodes a response for a single Mkdir request.
+type mkdirResponse struct {
+}
+
+// StatOptions controls parts of Stat()'s behavior.
+type StatOptions struct {
+ CheckForArchives bool // check for and populate the IsArchive bit in returned values
+ Excludes []string // contents to pretend don't exist, using the OS-specific path separator
+}
+
+// Stat globs the specified pattern in the specified directory and returns its
+// results.
+// If root and directory are both not specified, the current root directory is
+// used, and relative names in the globs list are treated as being relative to
+// the current working directory.
+// If root is specified and the current OS supports it, the stat() is performed
+// in a chrooted context. If the directory is specified as an absolute path,
+// it should either be the root directory or a subdirectory of the root
+// directory. Otherwise, the directory is treated as a path relative to the
+// root directory.
+// Relative names in the glob list are treated as being relative to the
+// directory.
+func Stat(root string, directory string, options StatOptions, globs []string) ([]*StatsForGlob, error) {
+ req := request{
+ Request: requestStat,
+ Root: root,
+ Directory: directory,
+ Globs: append([]string{}, globs...),
+ StatOptions: options,
+ }
+ resp, err := copier(nil, nil, req)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != "" {
+ return nil, errors.New(resp.Error)
+ }
+ return resp.Stat.Globs, nil
+}
+
+// GetOptions controls parts of Get()'s behavior.
+type GetOptions struct {
+ UIDMap, GIDMap []idtools.IDMap // map from hostIDs to containerIDs in the output archive
+ Excludes []string // contents to pretend don't exist, using the OS-specific path separator
+ ExpandArchives bool // extract the contents of named items that are archives
+ StripSetuidBit bool // strip the setuid bit off of items being copied. no effect on archives being extracted
+ StripSetgidBit bool // strip the setgid bit off of items being copied. no effect on archives being extracted
+ StripStickyBit bool // strip the sticky bit off of items being copied. no effect on archives being extracted
+ StripXattrs bool // don't record extended attributes of items being copied. no effect on archives being extracted
+ KeepDirectoryNames bool // don't strip the top directory's basename from the paths of items in subdirectories
+}
+
+// Get produces an archive containing items that match the specified glob
+// patterns and writes it to bulkWriter.
+// If root and directory are both not specified, the current root directory is
+// used, and relative names in the globs list are treated as being relative to
+// the current working directory.
+// If root is specified and the current OS supports it, the contents are read
+// in a chrooted context. If the directory is specified as an absolute path,
+// it should either be the root directory or a subdirectory of the root
+// directory. Otherwise, the directory is treated as a path relative to the
+// root directory.
+// Relative names in the glob list are treated as being relative to the
+// directory.
+func Get(root string, directory string, options GetOptions, globs []string, bulkWriter io.Writer) error {
+ req := request{
+ Request: requestGet,
+ Root: root,
+ Directory: directory,
+ Globs: append([]string{}, globs...),
+ StatOptions: StatOptions{
+ CheckForArchives: options.ExpandArchives,
+ },
+ GetOptions: options,
+ }
+ resp, err := copier(nil, bulkWriter, req)
+ if err != nil {
+ return err
+ }
+ if resp.Error != "" {
+ return errors.New(resp.Error)
+ }
+ return nil
+}
+
+// PutOptions controls parts of Put()'s behavior.
+type PutOptions struct {
+ UIDMap, GIDMap []idtools.IDMap // map from containerIDs to hostIDs when writing contents to disk
+ ChownDirs *idtools.IDPair // set ownership of newly-created directories
+ ChmodDirs *os.FileMode // set permissions on newly-created directories
+ ChownFiles *idtools.IDPair // set ownership of newly-created files
+ ChmodFiles *os.FileMode // set permissions on newly-created files
+ StripXattrs bool // don't bother trying to set extended attributes of items being copied
+ IgnoreXattrErrors bool // ignore any errors encountered when attempting to set extended attributes
+}
+
+// Put extracts an archive from the bulkReader at the specified directory.
+// If root and directory are both not specified, the current root directory is
+// used.
+// If root is specified and the current OS supports it, the contents are written
+// in a chrooted context. If the directory is specified as an absolute path,
+// it should either be the root directory or a subdirectory of the root
+// directory. Otherwise, the directory is treated as a path relative to the
+// root directory.
+func Put(root string, directory string, options PutOptions, bulkReader io.Reader) error {
+ req := request{
+ Request: requestPut,
+ Root: root,
+ Directory: directory,
+ PutOptions: options,
+ }
+ resp, err := copier(bulkReader, nil, req)
+ if err != nil {
+ return err
+ }
+ if resp.Error != "" {
+ return errors.New(resp.Error)
+ }
+ return nil
+}
+
+// MkdirOptions controls parts of Mkdir()'s behavior.
+type MkdirOptions struct {
+ UIDMap, GIDMap []idtools.IDMap // map from containerIDs to hostIDs when creating directories
+ ChownNew *idtools.IDPair // set ownership of newly-created directories
+ ChmodNew *os.FileMode // set permissions on newly-created directories
+}
+
+// Mkdir ensures that the specified directory exists. Any directories which
+// need to be created will be given the specified ownership and permissions.
+// If root and directory are both not specified, the current root directory is
+// used.
+// If root is specified and the current OS supports it, the directory is
+// created in a chrooted context. If the directory is specified as an absolute
+// path, it should either be the root directory or a subdirectory of the root
+// directory. Otherwise, the directory is treated as a path relative to the
+// root directory.
+func Mkdir(root string, directory string, options MkdirOptions) error {
+ req := request{
+ Request: requestMkdir,
+ Root: root,
+ Directory: directory,
+ MkdirOptions: options,
+ }
+ resp, err := copier(nil, nil, req)
+ if err != nil {
+ return err
+ }
+ if resp.Error != "" {
+ return errors.New(resp.Error)
+ }
+ return nil
+}
+
+// cleanerReldirectory resolves relative path candidate lexically, attempting
+// to ensure that when joined as a subdirectory of another directory, it does
+// not reference anything outside of that other directory.
+func cleanerReldirectory(candidate string) string {
+ cleaned := strings.TrimPrefix(filepath.Clean(string(os.PathSeparator)+candidate), string(os.PathSeparator))
+ if cleaned == "" {
+ return "."
+ }
+ return cleaned
+}
+
+// convertToRelSubirectory returns the path of directory, bound and relative to
+// root, as a relative path, or an error if that path can't be computed or if
+// the two directories are on different volumes
+func convertToRelSubdirectory(root, directory string) (relative string, err error) {
+ if root == "" || !filepath.IsAbs(root) {
+ return "", errors.Errorf("expected root directory to be an absolute path, got %q", root)
+ }
+ if directory == "" || !filepath.IsAbs(directory) {
+ return "", errors.Errorf("expected directory to be an absolute path, got %q", root)
+ }
+ if filepath.VolumeName(root) != filepath.VolumeName(directory) {
+ return "", errors.Errorf("%q and %q are on different volumes", root, directory)
+ }
+ rel, err := filepath.Rel(root, directory)
+ if err != nil {
+ return "", errors.Wrapf(err, "error computing path of %q relative to %q", directory, root)
+ }
+ return cleanerReldirectory(rel), nil
+}
+
+func currentVolumeRoot() (string, error) {
+ cwd, err := os.Getwd()
+ if err != nil {
+ return "", errors.Wrapf(err, "error getting current working directory")
+ }
+ return filepath.VolumeName(cwd) + string(os.PathSeparator), nil
+}
+
+func isVolumeRoot(candidate string) (bool, error) {
+ abs, err := filepath.Abs(candidate)
+ if err != nil {
+ return false, errors.Wrapf(err, "error converting %q to an absolute path", candidate)
+ }
+ return abs == filepath.VolumeName(abs)+string(os.PathSeparator), nil
+}
+
+func looksLikeAbs(candidate string) bool {
+ return candidate[0] == os.PathSeparator && (len(candidate) == 1 || candidate[1] != os.PathSeparator)
+}
+
+func copier(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, error) {
+ if req.Directory == "" {
+ if req.Root == "" {
+ wd, err := os.Getwd()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting current working directory")
+ }
+ req.Directory = wd
+ } else {
+ req.Directory = req.Root
+ }
+ }
+ if req.Root == "" {
+ root, err := currentVolumeRoot()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error determining root of current volume")
+ }
+ req.Root = root
+ }
+ if filepath.IsAbs(req.Directory) {
+ _, err := convertToRelSubdirectory(req.Root, req.Directory)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error rewriting %q to be relative to %q", req.Directory, req.Root)
+ }
+ }
+ isAlreadyRoot, err := isVolumeRoot(req.Root)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error checking if %q is a root directory", req.Root)
+ }
+ if !isAlreadyRoot && canChroot {
+ return copierWithSubprocess(bulkReader, bulkWriter, req)
+ }
+ return copierWithoutSubprocess(bulkReader, bulkWriter, req)
+}
+
+func copierWithoutSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, error) {
+ req.preservedRoot = req.Root
+ req.rootPrefix = string(os.PathSeparator)
+ req.preservedDirectory = req.Directory
+ req.preservedGlobs = append([]string{}, req.Globs...)
+ if !filepath.IsAbs(req.Directory) {
+ req.Directory = filepath.Join(req.Root, cleanerReldirectory(req.Directory))
+ }
+ absoluteGlobs := make([]string, 0, len(req.Globs))
+ for _, glob := range req.preservedGlobs {
+ if filepath.IsAbs(glob) {
+ relativeGlob, err := convertToRelSubdirectory(req.preservedRoot, glob)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error rewriting %q to be relative to %q: %v", glob, req.preservedRoot, err)
+ os.Exit(1)
+ }
+ absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Root, string(os.PathSeparator)+relativeGlob))
+ } else {
+ absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Directory, cleanerReldirectory(glob)))
+ }
+ }
+ req.Globs = absoluteGlobs
+ resp, cb, err := copierHandler(bulkReader, bulkWriter, req)
+ if err != nil {
+ return nil, err
+ }
+ if cb != nil {
+ if err = cb(); err != nil {
+ return nil, err
+ }
+ }
+ return resp, nil
+}
+
+func closeIfNotNilYet(f **os.File, what string) {
+ if f != nil && *f != nil {
+ err := (*f).Close()
+ *f = nil
+ if err != nil {
+ logrus.Debugf("error closing %s: %v", what, err)
+ }
+ }
+}
+
+func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req request) (resp *response, err error) {
+ if bulkReader == nil {
+ bulkReader = bytes.NewReader([]byte{})
+ }
+ if bulkWriter == nil {
+ bulkWriter = ioutil.Discard
+ }
+ cmd := reexec.Command(copierCommand)
+ stdinRead, stdinWrite, err := os.Pipe()
+ if err != nil {
+ return nil, errors.Wrapf(err, "pipe")
+ }
+ defer closeIfNotNilYet(&stdinRead, "stdin pipe reader")
+ defer closeIfNotNilYet(&stdinWrite, "stdin pipe writer")
+ encoder := json.NewEncoder(stdinWrite)
+ stdoutRead, stdoutWrite, err := os.Pipe()
+ if err != nil {
+ return nil, errors.Wrapf(err, "pipe")
+ }
+ defer closeIfNotNilYet(&stdoutRead, "stdout pipe reader")
+ defer closeIfNotNilYet(&stdoutWrite, "stdout pipe writer")
+ decoder := json.NewDecoder(stdoutRead)
+ bulkReaderRead, bulkReaderWrite, err := os.Pipe()
+ if err != nil {
+ return nil, errors.Wrapf(err, "pipe")
+ }
+ defer closeIfNotNilYet(&bulkReaderRead, "child bulk content reader pipe, read end")
+ defer closeIfNotNilYet(&bulkReaderWrite, "child bulk content reader pipe, write end")
+ bulkWriterRead, bulkWriterWrite, err := os.Pipe()
+ if err != nil {
+ return nil, errors.Wrapf(err, "pipe")
+ }
+ defer closeIfNotNilYet(&bulkWriterRead, "child bulk content writer pipe, read end")
+ defer closeIfNotNilYet(&bulkWriterWrite, "child bulk content writer pipe, write end")
+ cmd.Dir = "/"
+ cmd.Env = append([]string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}, os.Environ()...)
+
+ errorBuffer := bytes.Buffer{}
+ cmd.Stdin = stdinRead
+ cmd.Stdout = stdoutWrite
+ cmd.Stderr = &errorBuffer
+ cmd.ExtraFiles = []*os.File{bulkReaderRead, bulkWriterWrite}
+ if err = cmd.Start(); err != nil {
+ return nil, errors.Wrapf(err, "error starting subprocess")
+ }
+ cmdToWaitFor := cmd
+ defer func() {
+ if cmdToWaitFor != nil {
+ if err := cmdToWaitFor.Wait(); err != nil {
+ if errorBuffer.String() != "" {
+ logrus.Debug(errorBuffer.String())
+ }
+ }
+ }
+ }()
+ stdinRead.Close()
+ stdinRead = nil
+ stdoutWrite.Close()
+ stdoutWrite = nil
+ bulkReaderRead.Close()
+ bulkReaderRead = nil
+ bulkWriterWrite.Close()
+ bulkWriterWrite = nil
+ killAndReturn := func(err error, step string) (*response, error) { // nolint: unparam
+ if err2 := cmd.Process.Kill(); err2 != nil {
+ return nil, errors.Wrapf(err, "error killing subprocess: %v; %s", err2, step)
+ }
+ return nil, errors.Wrap(err, step)
+ }
+ if err = encoder.Encode(req); err != nil {
+ return killAndReturn(err, "error encoding request")
+ }
+ if err = decoder.Decode(&resp); err != nil {
+ return killAndReturn(err, "error decoding response")
+ }
+ if err = encoder.Encode(&request{Request: requestQuit}); err != nil {
+ return killAndReturn(err, "error encoding request")
+ }
+ stdinWrite.Close()
+ stdinWrite = nil
+ stdoutRead.Close()
+ stdoutRead = nil
+ var wg sync.WaitGroup
+ var readError, writeError error
+ wg.Add(1)
+ go func() {
+ _, writeError = io.Copy(bulkWriter, bulkWriterRead)
+ bulkWriterRead.Close()
+ bulkWriterRead = nil
+ wg.Done()
+ }()
+ wg.Add(1)
+ go func() {
+ _, readError = io.Copy(bulkReaderWrite, bulkReader)
+ bulkReaderWrite.Close()
+ bulkReaderWrite = nil
+ wg.Done()
+ }()
+ wg.Wait()
+ cmdToWaitFor = nil
+ if err = cmd.Wait(); err != nil {
+ if errorBuffer.String() != "" {
+ err = fmt.Errorf("%s", errorBuffer.String())
+ }
+ return nil, err
+ }
+ if cmd.ProcessState.Exited() && !cmd.ProcessState.Success() {
+ err = fmt.Errorf("subprocess exited with error")
+ if errorBuffer.String() != "" {
+ err = fmt.Errorf("%s", errorBuffer.String())
+ }
+ return nil, err
+ }
+ if readError != nil {
+ return nil, errors.Wrapf(readError, "error passing bulk input to subprocess")
+ }
+ if writeError != nil {
+ return nil, errors.Wrapf(writeError, "error passing bulk output from subprocess")
+ }
+ return resp, nil
+}
+
+func copierMain() {
+ var chrooted bool
+ decoder := json.NewDecoder(os.Stdin)
+ encoder := json.NewEncoder(os.Stdout)
+ previousRequestRoot := ""
+
+ // Set logging.
+ if level := os.Getenv("LOGLEVEL"); level != "" {
+ if ll, err := strconv.Atoi(level); err == nil {
+ logrus.SetLevel(logrus.Level(ll))
+ }
+ }
+
+ // Set up descriptors for receiving and sending tarstreams.
+ bulkReader := os.NewFile(3, "bulk-reader")
+ bulkWriter := os.NewFile(4, "bulk-writer")
+
+ for {
+ // Read a request.
+ req := new(request)
+ if err := decoder.Decode(req); err != nil {
+ fmt.Fprintf(os.Stderr, "error decoding request: %v", err)
+ os.Exit(1)
+ }
+ if req.Request == requestQuit {
+ // Making Quit a specific request means that we could
+ // run Stat() at a caller's behest before using the
+ // same process for Get() or Put(). Maybe later.
+ break
+ }
+
+ // Multiple requests should list the same root, because we
+ // can't un-chroot to chroot to some other location.
+ if previousRequestRoot != "" {
+ // Check that we got the same input value for
+ // where-to-chroot-to.
+ if req.Root != previousRequestRoot {
+ fmt.Fprintf(os.Stderr, "error: can't change location of chroot from %q to %q", previousRequestRoot, req.Root)
+ os.Exit(1)
+ }
+ previousRequestRoot = req.Root
+ } else {
+ // Figure out where to chroot to, if we weren't told.
+ if req.Root == "" {
+ root, err := currentVolumeRoot()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error determining root of current volume: %v", err)
+ os.Exit(1)
+ }
+ req.Root = root
+ }
+ // Change to the specified root directory.
+ var err error
+ chrooted, err = chroot(req.Root)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error changing to intended-new-root directory %q: %v", req.Root, err)
+ os.Exit(1)
+ }
+ }
+
+ req.preservedRoot = req.Root
+ req.rootPrefix = string(os.PathSeparator)
+ req.preservedDirectory = req.Directory
+ req.preservedGlobs = append([]string{}, req.Globs...)
+ if chrooted {
+ // We'll need to adjust some things now that the root
+ // directory isn't what it was. Make the directory and
+ // globs absolute paths for simplicity's sake.
+ absoluteDirectory := req.Directory
+ if !filepath.IsAbs(req.Directory) {
+ absoluteDirectory = filepath.Join(req.Root, cleanerReldirectory(req.Directory))
+ }
+ relativeDirectory, err := convertToRelSubdirectory(req.preservedRoot, absoluteDirectory)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error rewriting %q to be relative to %q: %v", absoluteDirectory, req.preservedRoot, err)
+ os.Exit(1)
+ }
+ req.Directory = filepath.Clean(string(os.PathSeparator) + relativeDirectory)
+ absoluteGlobs := make([]string, 0, len(req.Globs))
+ for i, glob := range req.preservedGlobs {
+ if filepath.IsAbs(glob) {
+ relativeGlob, err := convertToRelSubdirectory(req.preservedRoot, glob)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error rewriting %q to be relative to %q: %v", glob, req.preservedRoot, err)
+ os.Exit(1)
+ }
+ absoluteGlobs = append(absoluteGlobs, filepath.Clean(string(os.PathSeparator)+relativeGlob))
+ } else {
+ absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Directory, cleanerReldirectory(req.Globs[i])))
+ }
+ }
+ req.Globs = absoluteGlobs
+ req.rootPrefix = req.Root
+ req.Root = string(os.PathSeparator)
+ } else {
+ // Make the directory and globs absolute paths for
+ // simplicity's sake.
+ if !filepath.IsAbs(req.Directory) {
+ req.Directory = filepath.Join(req.Root, cleanerReldirectory(req.Directory))
+ }
+ absoluteGlobs := make([]string, 0, len(req.Globs))
+ for i, glob := range req.preservedGlobs {
+ if filepath.IsAbs(glob) {
+ absoluteGlobs = append(absoluteGlobs, req.Globs[i])
+ } else {
+ absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Directory, cleanerReldirectory(req.Globs[i])))
+ }
+ }
+ req.Globs = absoluteGlobs
+ }
+ resp, cb, err := copierHandler(bulkReader, bulkWriter, *req)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error handling request %#v: %v", *req, err)
+ os.Exit(1)
+ }
+ // Encode the response.
+ if err := encoder.Encode(resp); err != nil {
+ fmt.Fprintf(os.Stderr, "error encoding response %#v: %v", *req, err)
+ os.Exit(1)
+ }
+ // If there's bulk data to transfer, run the callback to either
+ // read or write it.
+ if cb != nil {
+ if err = cb(); err != nil {
+ fmt.Fprintf(os.Stderr, "error during bulk transfer for %#v: %v", *req, err)
+ os.Exit(1)
+ }
+ }
+ }
+}
+
+func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, func() error, error) {
+ // NewPatternMatcher splits patterns into components using
+ // os.PathSeparator, implying that it expects OS-specific naming
+ // conventions.
+ excludes := req.Excludes()
+ pm, err := fileutils.NewPatternMatcher(excludes)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error processing excludes list %v", excludes)
+ }
+
+ var idMappings *idtools.IDMappings
+ uidMap, gidMap := req.UIDMap(), req.GIDMap()
+ if len(uidMap) > 0 && len(gidMap) > 0 {
+ idMappings = idtools.NewIDMappingsFromMaps(uidMap, gidMap)
+ }
+
+ switch req.Request {
+ default:
+ return nil, nil, errors.Errorf("not an implemented request type: %q", req.Request)
+ case requestStat:
+ resp := copierHandlerStat(req, pm)
+ return resp, nil, nil
+ case requestGet:
+ return copierHandlerGet(bulkWriter, req, pm, idMappings)
+ case requestPut:
+ return copierHandlerPut(bulkReader, req, idMappings)
+ case requestMkdir:
+ return copierHandlerMkdir(req, idMappings)
+ case requestQuit:
+ return nil, nil, nil
+ }
+}
+
+// pathIsExcluded computes path relative to root, then asks the pattern matcher
+// if the result is excluded. Returns the relative path and the matcher's
+// results.
+func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bool, error) {
+ rel, err := convertToRelSubdirectory(root, path)
+ if err != nil {
+ return "", false, errors.Wrapf(err, "copier: error computing path of %q relative to root %q", path, root)
+ }
+ if pm == nil {
+ return rel, false, nil
+ }
+ if rel == "." {
+ // special case
+ return rel, false, nil
+ }
+ // Matches uses filepath.FromSlash() to convert candidates before
+ // checking if they match the patterns it's been given, implying that
+ // it expects Unix-style paths.
+ matches, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
+ if err != nil {
+ return rel, false, errors.Wrapf(err, "copier: error checking if %q is excluded", rel)
+ }
+ if matches {
+ return rel, true, nil
+ }
+ return rel, false, nil
+}
+
+// resolvePath resolves symbolic links in paths, treating the specified
+// directory as the root.
+// Resolving the path this way, and using the result, is in no way secure
+// against another process manipulating the content that we're looking at, and
+// it is not expected to be.
+// This helps us approximate chrooted behavior on systems and in test cases
+// where chroot isn't available.
+func resolvePath(root, path string, pm *fileutils.PatternMatcher) (string, error) {
+ rel, err := convertToRelSubdirectory(root, path)
+ if err != nil {
+ return "", errors.Errorf("error making path %q relative to %q", path, root)
+ }
+ workingPath := root
+ followed := 0
+ components := strings.Split(rel, string(os.PathSeparator))
+ excluded := false
+ for len(components) > 0 {
+ // if anything we try to examine is excluded, then resolution has to "break"
+ _, thisExcluded, err := pathIsExcluded(root, filepath.Join(workingPath, components[0]), pm)
+ if err != nil {
+ return "", err
+ }
+ excluded = excluded || thisExcluded
+ if !excluded {
+ if target, err := os.Readlink(filepath.Join(workingPath, components[0])); err == nil {
+ followed++
+ if followed > maxLoopsFollowed {
+ return "", &os.PathError{
+ Op: "open",
+ Path: path,
+ Err: syscall.ELOOP,
+ }
+ }
+ if filepath.IsAbs(target) || looksLikeAbs(target) {
+ // symlink to an absolute path - prepend the
+ // root directory to that absolute path to
+ // replace the current location, and resolve
+ // the remaining components
+ workingPath = root
+ components = append(strings.Split(target, string(os.PathSeparator)), components[1:]...)
+ continue
+ }
+ // symlink to a relative path - add the link target to
+ // the current location to get the next location, and
+ // resolve the remaining components
+ rel, err := convertToRelSubdirectory(root, filepath.Join(workingPath, target))
+ if err != nil {
+ return "", errors.Errorf("error making path %q relative to %q", filepath.Join(workingPath, target), root)
+ }
+ workingPath = root
+ components = append(strings.Split(filepath.Clean(string(os.PathSeparator)+rel), string(os.PathSeparator)), components[1:]...)
+ continue
+ }
+ }
+ // append the current component's name to get the next location
+ workingPath = filepath.Join(workingPath, components[0])
+ if workingPath == filepath.Join(root, "..") {
+ // attempted to go above the root using a relative path .., scope it
+ workingPath = root
+ }
+ // ready to handle the next component
+ components = components[1:]
+ }
+ return workingPath, nil
+}
+
+func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
+ errorResponse := func(fmtspec string, args ...interface{}) *response {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse{}}
+ }
+ if len(req.Globs) == 0 {
+ return errorResponse("copier: stat: expected at least one glob pattern, got none")
+ }
+ var stats []*StatsForGlob
+ for i, glob := range req.Globs {
+ s := StatsForGlob{
+ Glob: req.preservedGlobs[i],
+ }
+ stats = append(stats, &s)
+ // glob this pattern
+ globMatched, err := filepath.Glob(glob)
+ if err != nil {
+ s.Error = fmt.Sprintf("copier: stat: %q while matching glob pattern %q", err.Error(), glob)
+ continue
+ }
+ // collect the matches
+ s.Globbed = make([]string, 0, len(globMatched))
+ s.Results = make(map[string]*StatForItem)
+ for _, globbed := range globMatched {
+ rel, excluded, err := pathIsExcluded(req.Root, globbed, pm)
+ if err != nil {
+ return errorResponse("copier: stat: %v", err)
+ }
+ if excluded {
+ continue
+ }
+ // if the glob was an absolute path, reconstruct the
+ // path that we should hand back for the match
+ var resultName string
+ if filepath.IsAbs(req.preservedGlobs[i]) {
+ resultName = filepath.Join(req.rootPrefix, globbed)
+ } else {
+ relResult := rel
+ if req.Directory != req.Root {
+ relResult, err = convertToRelSubdirectory(req.Directory, globbed)
+ if err != nil {
+ return errorResponse("copier: stat: error making %q relative to %q: %v", globbed, req.Directory, err)
+ }
+ }
+ resultName = relResult
+ }
+ result := StatForItem{Name: resultName}
+ s.Globbed = append(s.Globbed, resultName)
+ s.Results[resultName] = &result
+ // lstat the matched value
+ linfo, err := os.Lstat(globbed)
+ if err != nil {
+ result.Error = err.Error()
+ continue
+ }
+ result.Size = linfo.Size()
+ result.Mode = linfo.Mode()
+ result.ModTime = linfo.ModTime()
+ result.IsDir = linfo.IsDir()
+ result.IsRegular = result.Mode.IsRegular()
+ result.IsSymlink = (linfo.Mode() & os.ModeType) == os.ModeSymlink
+ checkForArchive := req.StatOptions.CheckForArchives
+ if result.IsSymlink {
+ // if the match was a symbolic link, read it
+ immediateTarget, err := os.Readlink(globbed)
+ if err != nil {
+ result.Error = err.Error()
+ continue
+ }
+ // record where it points, both by itself (it
+ // could be a relative link) and in the context
+ // of the chroot
+ result.ImmediateTarget = immediateTarget
+ resolvedTarget, err := resolvePath(req.Root, globbed, pm)
+ if err != nil {
+ return errorResponse("copier: stat: error resolving %q: %v", globbed, err)
+ }
+ // lstat the thing that we point to
+ info, err := os.Lstat(resolvedTarget)
+ if err != nil {
+ result.Error = err.Error()
+ continue
+ }
+ // replace IsArchive/IsDir/IsRegular with info about the target
+ if info.Mode().IsRegular() && req.StatOptions.CheckForArchives {
+ result.IsArchive = isArchivePath(resolvedTarget)
+ checkForArchive = false
+ }
+ result.IsDir = info.IsDir()
+ result.IsRegular = info.Mode().IsRegular()
+ }
+ if result.IsRegular && checkForArchive {
+ // we were asked to check on this, and it
+ // wasn't a symlink, in which case we'd have
+ // already checked what the link points to
+ result.IsArchive = isArchivePath(globbed)
+ }
+ }
+ // no unskipped matches -> error
+ if len(s.Globbed) == 0 {
+ s.Globbed = nil
+ s.Results = nil
+ s.Error = fmt.Sprintf("copier: stat: %q: %v", glob, syscall.ENOENT)
+ }
+ }
+ return &response{Stat: statResponse{Globs: stats}}
+}
+
+func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) (*response, func() error, error) {
+ statRequest := req
+ statRequest.Request = requestStat
+ statResponse := copierHandlerStat(req, pm)
+ errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse.Stat, Get: getResponse{}}, nil, nil
+ }
+ if statResponse.Error != "" {
+ return errorResponse("%s", statResponse.Error)
+ }
+ if len(req.Globs) == 0 {
+ return errorResponse("copier: get: expected at least one glob pattern, got 0")
+ }
+ // build a queue of items by globbing
+ var queue []string
+ globMatchedCount := 0
+ for _, glob := range req.Globs {
+ globMatched, err := filepath.Glob(glob)
+ if err != nil {
+ return errorResponse("copier: get: glob %q: %v", glob, err)
+ }
+ globMatchedCount += len(globMatched)
+ filtered := make([]string, 0, len(globMatched))
+ for _, globbed := range globMatched {
+ rel, excluded, err := pathIsExcluded(req.Root, globbed, pm)
+ if err != nil {
+ return errorResponse("copier: get: checking if %q is excluded: %v", globbed, err)
+ }
+ if rel == "." || !excluded {
+ filtered = append(filtered, globbed)
+ }
+ }
+ if len(filtered) == 0 {
+ return errorResponse("copier: get: glob %q matched nothing (%d filtered out of %v): %v", glob, len(globMatched), globMatched, syscall.ENOENT)
+ }
+ queue = append(queue, filtered...)
+ }
+ // no matches -> error
+ if len(queue) == 0 {
+ return errorResponse("copier: get: globs %v matched nothing (%d filtered out): %v", req.Globs, globMatchedCount, syscall.ENOENT)
+ }
+ cb := func() error {
+ tw := tar.NewWriter(bulkWriter)
+ defer tw.Close()
+ hardlinkChecker := new(util.HardlinkChecker)
+ itemsCopied := 0
+ for i, item := range queue {
+ // if we're not discarding the names of individual directories, keep track of this one
+ relNamePrefix := ""
+ if req.GetOptions.KeepDirectoryNames {
+ relNamePrefix = filepath.Base(item)
+ }
+ // if the named thing-to-read is a symlink, dereference it
+ info, err := os.Lstat(item)
+ if err != nil {
+ return errors.Wrapf(err, "copier: get: lstat %q", item)
+ }
+ // chase links. if we hit a dead end, we should just fail
+ followedLinks := 0
+ const maxFollowedLinks = 16
+ for info.Mode()&os.ModeType == os.ModeSymlink && followedLinks < maxFollowedLinks {
+ path, err := os.Readlink(item)
+ if err != nil {
+ continue
+ }
+ if filepath.IsAbs(path) || looksLikeAbs(path) {
+ path = filepath.Join(req.Root, path)
+ } else {
+ path = filepath.Join(filepath.Dir(item), path)
+ }
+ item = path
+ if _, err = convertToRelSubdirectory(req.Root, item); err != nil {
+ return errors.Wrapf(err, "copier: get: computing path of %q(%q) relative to %q", queue[i], item, req.Root)
+ }
+ if info, err = os.Lstat(item); err != nil {
+ return errors.Wrapf(err, "copier: get: lstat %q(%q)", queue[i], item)
+ }
+ followedLinks++
+ }
+ if followedLinks >= maxFollowedLinks {
+ return errors.Wrapf(syscall.ELOOP, "copier: get: resolving symlink %q(%q)", queue[i], item)
+ }
+ // evaluate excludes relative to the root directory
+ if info.Mode().IsDir() {
+ walkfn := func(path string, info os.FileInfo, err error) error {
+ // compute the path of this item
+ // relative to the top-level directory,
+ // for the tar header
+ rel, relErr := convertToRelSubdirectory(item, path)
+ if relErr != nil {
+ return errors.Wrapf(relErr, "copier: get: error computing path of %q relative to top directory %q", path, item)
+ }
+ if err != nil {
+ return errors.Wrapf(err, "copier: get: error reading %q", path)
+ }
+ // prefix the original item's name if we're keeping it
+ if relNamePrefix != "" {
+ rel = filepath.Join(relNamePrefix, rel)
+ }
+ if rel == "" || rel == "." {
+ // skip the "." entry
+ return nil
+ }
+ _, skip, err := pathIsExcluded(req.Root, path, pm)
+ if err != nil {
+ return err
+ }
+ if skip {
+ // don't use filepath.SkipDir
+ // here, since a more specific
+ // but-include-this for
+ // something under it might
+ // also be in the excludes list
+ return nil
+ }
+ // if it's a symlink, read its target
+ symlinkTarget := ""
+ if info.Mode()&os.ModeType == os.ModeSymlink {
+ target, err := os.Readlink(path)
+ if err != nil {
+ return errors.Wrapf(err, "copier: get: readlink(%q(%q))", rel, path)
+ }
+ symlinkTarget = target
+ }
+ // add the item to the outgoing tar stream
+ return copierHandlerGetOne(info, symlinkTarget, rel, path, req.GetOptions, tw, hardlinkChecker, idMappings)
+ }
+ // walk the directory tree, checking/adding items individually
+ if err := filepath.Walk(item, walkfn); err != nil {
+ return errors.Wrapf(err, "copier: get: %q(%q)", queue[i], item)
+ }
+ itemsCopied++
+ } else {
+ _, skip, err := pathIsExcluded(req.Root, item, pm)
+ if err != nil {
+ return err
+ }
+ if skip {
+ continue
+ }
+ // add the item to the outgoing tar stream. in
+ // cases where this was a symlink that we
+ // dereferenced, be sure to use the name of the
+ // link.
+ if err := copierHandlerGetOne(info, "", filepath.Base(queue[i]), item, req.GetOptions, tw, hardlinkChecker, idMappings); err != nil {
+ return errors.Wrapf(err, "copier: get: %q", queue[i])
+ }
+ itemsCopied++
+ }
+ }
+ if itemsCopied == 0 {
+ return errors.New("copier: get: copied no items")
+ }
+ return nil
+ }
+ return &response{Stat: statResponse.Stat, Get: getResponse{}}, cb, nil
+}
+
+func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath string, options GetOptions, tw *tar.Writer, hardlinkChecker *util.HardlinkChecker, idMappings *idtools.IDMappings) error {
+ // build the header using the name provided
+ hdr, err := tar.FileInfoHeader(srcfi, symlinkTarget)
+ if err != nil {
+ return errors.Wrapf(err, "error generating tar header for %s (%s)", contentPath, symlinkTarget)
+ }
+ if name != "" {
+ hdr.Name = filepath.ToSlash(name)
+ }
+ if options.StripSetuidBit {
+ hdr.Mode &^= cISUID
+ }
+ if options.StripSetgidBit {
+ hdr.Mode &^= cISGID
+ }
+ if options.StripStickyBit {
+ hdr.Mode &^= cISVTX
+ }
+ // read extended attributes
+ var xattrs map[string]string
+ if !options.StripXattrs {
+ xattrs, err = Lgetxattrs(contentPath)
+ if err != nil {
+ return errors.Wrapf(err, "error getting extended attributes for %q", contentPath)
+ }
+ }
+ hdr.Xattrs = xattrs // nolint:staticcheck
+ if hdr.Typeflag == tar.TypeReg {
+ // if it's an archive and we're extracting archives, read the
+ // file and spool out its contents in-line. (if we just
+ // inlined the whole file, we'd also be inlining the EOF marker
+ // it contains)
+ if options.ExpandArchives && isArchivePath(contentPath) {
+ f, err := os.Open(contentPath)
+ if err != nil {
+ return errors.Wrapf(err, "error opening %s", contentPath)
+ }
+ defer f.Close()
+ rc, _, err := compression.AutoDecompress(f)
+ if err != nil {
+ return errors.Wrapf(err, "error decompressing %s", contentPath)
+ }
+ defer rc.Close()
+ tr := tar.NewReader(rc)
+ hdr, err := tr.Next()
+ for err == nil {
+ if err = tw.WriteHeader(hdr); err != nil {
+ return errors.Wrapf(err, "error writing tar header from %q to pipe", contentPath)
+ }
+ if hdr.Size != 0 {
+ n, err := io.Copy(tw, tr)
+ if err != nil {
+ return errors.Wrapf(err, "error extracting content from archive %s: %s", contentPath, hdr.Name)
+ }
+ if n != hdr.Size {
+ return errors.Errorf("error extracting contents of archive %s: incorrect length for %q", contentPath, hdr.Name)
+ }
+ tw.Flush()
+ }
+ hdr, err = tr.Next()
+ }
+ if err != io.EOF {
+ return errors.Wrapf(err, "error extracting contents of archive %s", contentPath)
+ }
+ return nil
+ }
+ // if this regular file is hard linked to something else we've
+ // already added, set up to output a TypeLink entry instead of
+ // a TypeReg entry
+ target := hardlinkChecker.Check(srcfi)
+ if target != "" {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = filepath.ToSlash(target)
+ hdr.Size = 0
+ } else {
+ // note the device/inode pair for this file
+ hardlinkChecker.Add(srcfi, name)
+ }
+ }
+ // map the ownership for the archive
+ if idMappings != nil && !idMappings.Empty() {
+ hostPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
+ hdr.Uid, hdr.Gid, err = idMappings.ToContainer(hostPair)
+ if err != nil {
+ return errors.Wrapf(err, "error mapping host filesystem owners %#v to container filesystem owners", hostPair)
+ }
+ }
+ // output the header
+ if err = tw.WriteHeader(hdr); err != nil {
+ return errors.Wrapf(err, "error writing header for %s (%s)", contentPath, hdr.Name)
+ }
+ if hdr.Typeflag == tar.TypeReg {
+ // output the content
+ f, err := os.Open(contentPath)
+ if err != nil {
+ return errors.Wrapf(err, "error opening %s", contentPath)
+ }
+ defer f.Close()
+ n, err := io.Copy(tw, f)
+ if err != nil {
+ return errors.Wrapf(err, "error copying %s", contentPath)
+ }
+ if n != hdr.Size {
+ return errors.Errorf("error copying %s: incorrect size (expected %d bytes, read %d bytes)", contentPath, n, hdr.Size)
+ }
+ tw.Flush()
+ }
+ return nil
+}
+
+func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDMappings) (*response, func() error, error) {
+ errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Put: putResponse{}}, nil, nil
+ }
+ dirUID, dirGID := 0, 0
+ if req.PutOptions.ChownDirs != nil {
+ dirUID, dirGID = req.PutOptions.ChownDirs.UID, req.PutOptions.ChownDirs.GID
+ }
+ dirMode := os.FileMode(0755)
+ if req.PutOptions.ChmodDirs != nil {
+ dirMode = *req.PutOptions.ChmodDirs
+ }
+ var fileUID, fileGID *int
+ if req.PutOptions.ChownFiles != nil {
+ fileUID, fileGID = &req.PutOptions.ChownFiles.UID, &req.PutOptions.ChownFiles.GID
+ }
+ if idMappings != nil && !idMappings.Empty() {
+ containerDirPair := idtools.IDPair{UID: dirUID, GID: dirGID}
+ hostDirPair, err := idMappings.ToHost(containerDirPair)
+ if err != nil {
+ return errorResponse("copier: put: error mapping container filesystem owner %d:%d to host filesystem owners: %v", dirUID, dirGID, err)
+ }
+ dirUID, dirGID = hostDirPair.UID, hostDirPair.GID
+ if req.PutOptions.ChownFiles != nil {
+ containerFilePair := idtools.IDPair{UID: *fileUID, GID: *fileGID}
+ hostFilePair, err := idMappings.ToHost(containerFilePair)
+ if err != nil {
+ return errorResponse("copier: put: error mapping container filesystem owner %d:%d to host filesystem owners: %v", fileUID, fileGID, err)
+ }
+ fileUID, fileGID = &hostFilePair.UID, &hostFilePair.GID
+ }
+ }
+ ensureDirectoryUnderRoot := func(directory string) error {
+ rel, err := convertToRelSubdirectory(req.Root, directory)
+ if err != nil {
+ return errors.Wrapf(err, "%q is not a subdirectory of %q", directory, req.Root)
+ }
+ subdir := ""
+ for _, component := range strings.Split(rel, string(os.PathSeparator)) {
+ subdir = filepath.Join(subdir, component)
+ path := filepath.Join(req.Root, subdir)
+ if err := os.Mkdir(path, 0700); err == nil {
+ if err = lchown(path, dirUID, dirGID); err != nil {
+ return errors.Wrapf(err, "copier: put: error setting owner of %q to %d:%d", path, dirUID, dirGID)
+ }
+ if err = os.Chmod(path, dirMode); err != nil {
+ return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, dirMode)
+ }
+ } else {
+ if !os.IsExist(err) {
+ return errors.Wrapf(err, "copier: put: error checking directory %q", path)
+ }
+ }
+ }
+ return nil
+ }
+ createFile := func(path string, tr *tar.Reader) (int64, error) {
+ f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
+ if err != nil && os.IsExist(err) {
+ if err = os.Remove(path); err != nil {
+ return 0, errors.Wrapf(err, "copier: put: error removing file to be overwritten %q", path)
+ }
+ f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
+ }
+ if err != nil {
+ return 0, errors.Wrapf(err, "copier: put: error opening file %q for writing", path)
+ }
+ defer f.Close()
+ n, err := io.Copy(f, tr)
+ if err != nil {
+ return n, errors.Wrapf(err, "copier: put: error writing file %q", path)
+ }
+ return n, nil
+ }
+ targetDirectory, err := resolvePath(req.Root, req.Directory, nil)
+ if err != nil {
+ return errorResponse("copier: put: error resolving %q: %v", req.Directory, err)
+ }
+ info, err := os.Lstat(targetDirectory)
+ if err == nil {
+ if !info.IsDir() {
+ return errorResponse("copier: put: %s (%s): exists but is not a directory", req.Directory, targetDirectory)
+ }
+ } else {
+ if !os.IsNotExist(err) {
+ return errorResponse("copier: put: %s: %v", req.Directory, err)
+ }
+ if err := ensureDirectoryUnderRoot(req.Directory); err != nil {
+ return errorResponse("copier: put: %v", err)
+ }
+ }
+ cb := func() error {
+ type directoryAndTimes struct {
+ directory string
+ atime, mtime time.Time
+ }
+ var directoriesAndTimes []directoryAndTimes
+ defer func() {
+ for i := range directoriesAndTimes {
+ directoryAndTimes := directoriesAndTimes[len(directoriesAndTimes)-i-1]
+ if err := lutimes(false, directoryAndTimes.directory, directoryAndTimes.atime, directoryAndTimes.mtime); err != nil {
+ logrus.Debugf("error setting access and modify timestamps on %q to %s and %s: %v", directoryAndTimes.directory, directoryAndTimes.atime, directoryAndTimes.mtime, err)
+ }
+ }
+ }()
+ tr := tar.NewReader(bulkReader)
+ hdr, err := tr.Next()
+ for err == nil {
+ // figure out who should own this new item
+ if idMappings != nil && !idMappings.Empty() {
+ containerPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
+ hostPair, err := idMappings.ToHost(containerPair)
+ if err != nil {
+ return errors.Wrapf(err, "error mapping container filesystem owner 0,0 to host filesystem owners")
+ }
+ hdr.Uid, hdr.Gid = hostPair.UID, hostPair.GID
+ }
+ if hdr.Typeflag == tar.TypeDir {
+ if req.PutOptions.ChownDirs != nil {
+ hdr.Uid, hdr.Gid = dirUID, dirGID
+ }
+ } else {
+ if req.PutOptions.ChownFiles != nil {
+ hdr.Uid, hdr.Gid = *fileUID, *fileGID
+ }
+ }
+ // make sure the parent directory exists
+ path := filepath.Join(targetDirectory, cleanerReldirectory(filepath.FromSlash(hdr.Name)))
+ if err := ensureDirectoryUnderRoot(filepath.Dir(path)); err != nil {
+ return err
+ }
+ // figure out what the permissions should be
+ if hdr.Typeflag == tar.TypeDir {
+ if req.PutOptions.ChmodDirs != nil {
+ hdr.Mode = int64(*req.PutOptions.ChmodDirs)
+ }
+ } else {
+ if req.PutOptions.ChmodFiles != nil {
+ hdr.Mode = int64(*req.PutOptions.ChmodFiles)
+ }
+ }
+ // create the new item
+ devMajor := uint32(hdr.Devmajor)
+ devMinor := uint32(hdr.Devminor)
+ switch hdr.Typeflag {
+ // no type flag for sockets
+ default:
+ return errors.Errorf("unrecognized Typeflag %c", hdr.Typeflag)
+ case tar.TypeReg, tar.TypeRegA:
+ var written int64
+ written, err = createFile(path, tr)
+ if written != hdr.Size {
+ return errors.Errorf("copier: put: error creating %q: incorrect length (%d != %d)", path, written, hdr.Size)
+ }
+ case tar.TypeLink:
+ var linkTarget string
+ if linkTarget, err = resolvePath(targetDirectory, filepath.Join(req.Root, filepath.FromSlash(hdr.Linkname)), nil); err != nil {
+ return errors.Errorf("error resolving hardlink target path %q under root %q", hdr.Linkname, req.Root)
+ }
+ if err = os.Link(linkTarget, path); err != nil && os.IsExist(err) {
+ if err = os.Remove(path); err == nil {
+ err = os.Link(linkTarget, path)
+ }
+ }
+ case tar.TypeSymlink:
+ if err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path)); err != nil && os.IsExist(err) {
+ if err = os.Remove(path); err == nil {
+ err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path))
+ }
+ }
+ case tar.TypeChar:
+ if err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))); err != nil && os.IsExist(err) {
+ if err = os.Remove(path); err == nil {
+ err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor)))
+ }
+ }
+ case tar.TypeBlock:
+ if err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))); err != nil && os.IsExist(err) {
+ if err = os.Remove(path); err == nil {
+ err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor)))
+ }
+ }
+ case tar.TypeDir:
+ if err = os.Mkdir(path, 0700); err != nil && os.IsExist(err) {
+ err = nil
+ }
+ // make a note of the directory's times. we
+ // might create items under it, which will
+ // cause the mtime to change after we correct
+ // it, so we'll need to correct it again later
+ directoriesAndTimes = append(directoriesAndTimes, directoryAndTimes{
+ directory: path,
+ atime: hdr.AccessTime,
+ mtime: hdr.ModTime,
+ })
+ case tar.TypeFifo:
+ if err = mkfifo(path, 0600); err != nil && os.IsExist(err) {
+ if err = os.Remove(path); err == nil {
+ err = mkfifo(path, 0600)
+ }
+ }
+ }
+ // check for errors
+ if err != nil {
+ return errors.Wrapf(err, "copier: put: error creating %q", path)
+ }
+ // restore xattrs
+ if !req.PutOptions.StripXattrs {
+ if err = Lsetxattrs(path, hdr.Xattrs); err != nil { // nolint:staticcheck
+ if !req.PutOptions.IgnoreXattrErrors {
+ return errors.Wrapf(err, "copier: put: error setting extended attributes on %q", path)
+ }
+ }
+ }
+ // set ownership
+ if err = lchown(path, hdr.Uid, hdr.Gid); err != nil {
+ return errors.Wrapf(err, "copier: put: error setting ownership of %q to %d:%d", path, hdr.Uid, hdr.Gid)
+ }
+ // set permissions, except for symlinks, since we don't have lchmod
+ mode := os.FileMode(hdr.Mode) & os.ModePerm
+ if hdr.Typeflag != tar.TypeSymlink {
+ if err = os.Chmod(path, mode); err != nil {
+ return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, mode)
+ }
+ }
+ // set other bits that might have been reset by chown()
+ if hdr.Typeflag != tar.TypeSymlink {
+ if hdr.Mode&cISUID == cISUID {
+ mode |= syscall.S_ISUID
+ }
+ if hdr.Mode&cISGID == cISGID {
+ mode |= syscall.S_ISGID
+ }
+ if hdr.Mode&cISVTX == cISVTX {
+ mode |= syscall.S_ISVTX
+ }
+ if err = syscall.Chmod(path, uint32(mode)); err != nil {
+ return errors.Wrapf(err, "error setting additional permissions on %q to 0%o", path, mode)
+ }
+ }
+ // set time
+ if hdr.AccessTime.IsZero() || hdr.AccessTime.Before(hdr.ModTime) {
+ hdr.AccessTime = hdr.ModTime
+ }
+ if err = lutimes(hdr.Typeflag == tar.TypeSymlink, path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return errors.Wrapf(err, "error setting access and modify timestamps on %q to %s and %s", path, hdr.AccessTime, hdr.ModTime)
+ }
+ hdr, err = tr.Next()
+ }
+ if err != io.EOF {
+ return errors.Wrapf(err, "error reading tar stream: expected EOF")
+ }
+ return nil
+ }
+ return &response{Error: "", Put: putResponse{}}, cb, nil
+}
+
+func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response, func() error, error) {
+ errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Mkdir: mkdirResponse{}}, nil, nil
+ }
+ dirUID, dirGID := 0, 0
+ if req.MkdirOptions.ChownNew != nil {
+ dirUID, dirGID = req.MkdirOptions.ChownNew.UID, req.MkdirOptions.ChownNew.GID
+ }
+ dirMode := os.FileMode(0755)
+ if req.MkdirOptions.ChmodNew != nil {
+ dirMode = *req.MkdirOptions.ChmodNew
+ }
+ if idMappings != nil && !idMappings.Empty() {
+ containerDirPair := idtools.IDPair{UID: dirUID, GID: dirGID}
+ hostDirPair, err := idMappings.ToHost(containerDirPair)
+ if err != nil {
+ return errorResponse("copier: mkdir: error mapping container filesystem owner %d:%d to host filesystem owners: %v", dirUID, dirGID, err)
+ }
+ dirUID, dirGID = hostDirPair.UID, hostDirPair.GID
+ }
+
+ directory, err := resolvePath(req.Root, req.Directory, nil)
+ if err != nil {
+ return errorResponse("copier: mkdir: error resolving %q: %v", req.Directory, err)
+ }
+
+ rel, err := convertToRelSubdirectory(req.Root, directory)
+ if err != nil {
+ return errorResponse("copier: mkdir: error computing path of %q relative to %q: %v", directory, req.Root, err)
+ }
+
+ subdir := ""
+ for _, component := range strings.Split(rel, string(os.PathSeparator)) {
+ subdir = filepath.Join(subdir, component)
+ path := filepath.Join(req.Root, subdir)
+ if err := os.Mkdir(path, 0700); err == nil {
+ if err = chown(path, dirUID, dirGID); err != nil {
+ return errorResponse("copier: mkdir: error setting owner of %q to %d:%d: %v", path, dirUID, dirGID, err)
+ }
+ if err = chmod(path, dirMode); err != nil {
+ return errorResponse("copier: mkdir: error setting permissions on %q to 0%o: %v", path, dirMode)
+ }
+ } else {
+ if !os.IsExist(err) {
+ return errorResponse("copier: mkdir: error checking directory %q: %v", path, err)
+ }
+ }
+ }
+
+ return &response{Error: "", Mkdir: mkdirResponse{}}, nil, nil
+}
diff --git a/vendor/github.com/containers/buildah/copier/syscall_unix.go b/vendor/github.com/containers/buildah/copier/syscall_unix.go
new file mode 100644
index 000000000..55f2f368a
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/syscall_unix.go
@@ -0,0 +1,79 @@
+// +build !windows
+
+package copier
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+var canChroot = true
+
+func chroot(root string) (bool, error) {
+ if canChroot {
+ if err := os.Chdir(root); err != nil {
+ return false, fmt.Errorf("error changing to intended-new-root directory %q: %v", root, err)
+ }
+ if err := unix.Chroot(root); err != nil {
+ return false, fmt.Errorf("error chrooting to directory %q: %v", root, err)
+ }
+ if err := os.Chdir(string(os.PathSeparator)); err != nil {
+ return false, fmt.Errorf("error changing to just-became-root directory %q: %v", root, err)
+ }
+ return true, nil
+ }
+ return false, nil
+}
+
+func chrMode(mode os.FileMode) uint32 {
+ return uint32(unix.S_IFCHR | mode)
+}
+
+func blkMode(mode os.FileMode) uint32 {
+ return uint32(unix.S_IFBLK | mode)
+}
+
+func mkdev(major, minor uint32) uint64 {
+ return unix.Mkdev(major, minor)
+}
+
+func mkfifo(path string, mode uint32) error {
+ return unix.Mkfifo(path, mode)
+}
+
+func mknod(path string, mode uint32, dev int) error {
+ return unix.Mknod(path, mode, dev)
+}
+
+func chmod(path string, mode os.FileMode) error {
+ return os.Chmod(path, mode)
+}
+
+func chown(path string, uid, gid int) error {
+ return os.Chown(path, uid, gid)
+}
+
+func lchown(path string, uid, gid int) error {
+ return os.Lchown(path, uid, gid)
+}
+
+func lutimes(isSymlink bool, path string, atime, mtime time.Time) error {
+ if atime.IsZero() || mtime.IsZero() {
+ now := time.Now()
+ if atime.IsZero() {
+ atime = now
+ }
+ if mtime.IsZero() {
+ mtime = now
+ }
+ }
+ return unix.Lutimes(path, []unix.Timeval{unix.NsecToTimeval(atime.UnixNano()), unix.NsecToTimeval(mtime.UnixNano())})
+}
+
+const (
+ testModeMask = int64(os.ModePerm)
+ testIgnoreSymlinkDates = false
+)
diff --git a/vendor/github.com/containers/buildah/copier/syscall_windows.go b/vendor/github.com/containers/buildah/copier/syscall_windows.go
new file mode 100644
index 000000000..be50d473d
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/syscall_windows.go
@@ -0,0 +1,83 @@
+// +build windows
+
+package copier
+
+import (
+ "errors"
+ "os"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+var canChroot = false
+
+func chroot(path string) (bool, error) {
+ return false, nil
+}
+
+func chrMode(mode os.FileMode) uint32 {
+ return windows.S_IFCHR | uint32(mode)
+}
+
+func blkMode(mode os.FileMode) uint32 {
+ return windows.S_IFBLK | uint32(mode)
+}
+
+func mkdev(major, minor uint32) uint64 {
+ return 0
+}
+
+func mkfifo(path string, mode uint32) error {
+ return syscall.ENOSYS
+}
+
+func mknod(path string, mode uint32, dev int) error {
+ return syscall.ENOSYS
+}
+
+func chmod(path string, mode os.FileMode) error {
+ err := os.Chmod(path, mode)
+ if err != nil && errors.Is(err, syscall.EWINDOWS) {
+ return nil
+ }
+ return err
+}
+
+func chown(path string, uid, gid int) error {
+ err := os.Chown(path, uid, gid)
+ if err != nil && errors.Is(err, syscall.EWINDOWS) {
+ return nil
+ }
+ return err
+}
+
+func lchown(path string, uid, gid int) error {
+ err := os.Lchown(path, uid, gid)
+ if err != nil && errors.Is(err, syscall.EWINDOWS) {
+ return nil
+ }
+ return err
+}
+
+func lutimes(isSymlink bool, path string, atime, mtime time.Time) error {
+ if isSymlink {
+ return nil
+ }
+ if atime.IsZero() || mtime.IsZero() {
+ now := time.Now()
+ if atime.IsZero() {
+ atime = now
+ }
+ if mtime.IsZero() {
+ mtime = now
+ }
+ }
+ return windows.UtimesNano(path, []windows.Timespec{windows.NsecToTimespec(atime.UnixNano()), windows.NsecToTimespec(mtime.UnixNano())})
+}
+
+const (
+ testModeMask = int64(0600)
+ testIgnoreSymlinkDates = true
+)
diff --git a/vendor/github.com/containers/buildah/copier/unwrap_112.go b/vendor/github.com/containers/buildah/copier/unwrap_112.go
new file mode 100644
index 000000000..ebbad08b9
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/unwrap_112.go
@@ -0,0 +1,11 @@
+// +build !go113
+
+package copier
+
+import (
+ "github.com/pkg/errors"
+)
+
+func unwrapError(err error) error {
+ return errors.Cause(err)
+}
diff --git a/vendor/github.com/containers/buildah/copier/unwrap_113.go b/vendor/github.com/containers/buildah/copier/unwrap_113.go
new file mode 100644
index 000000000..cd0d0fb68
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/unwrap_113.go
@@ -0,0 +1,18 @@
+// +build go113
+
+package copier
+
+import (
+ stderror "errors"
+
+ "github.com/pkg/errors"
+)
+
+func unwrapError(err error) error {
+ e := errors.Cause(err)
+ for e != nil {
+ err = e
+ e = errors.Unwrap(err)
+ }
+ return err
+}
diff --git a/vendor/github.com/containers/buildah/copier/xattrs.go b/vendor/github.com/containers/buildah/copier/xattrs.go
new file mode 100644
index 000000000..71769989c
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/xattrs.go
@@ -0,0 +1,92 @@
+// +build linux netbsd freebsd darwin
+
+package copier
+
+import (
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/pkg/errors"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ xattrsSupported = true
+)
+
+var (
+ relevantAttributes = []string{"security.capability", "security.ima", "user.*"} // the attributes that we preserve - we discard others
+)
+
+// isRelevantXattr checks if "attribute" matches one of the attribute patterns
+// listed in the "relevantAttributes" list.
+func isRelevantXattr(attribute string) bool {
+ for _, relevant := range relevantAttributes {
+ matched, err := filepath.Match(relevant, attribute)
+ if err != nil || !matched {
+ continue
+ }
+ return true
+ }
+ return false
+}
+
+// Lgetxattrs returns a map of the relevant extended attributes set on the given file.
+func Lgetxattrs(path string) (map[string]string, error) {
+ maxSize := 64 * 1024 * 1024
+ listSize := 64 * 1024
+ var list []byte
+ for listSize < maxSize {
+ list = make([]byte, listSize)
+ size, err := unix.Llistxattr(path, list)
+ if err != nil {
+ if unwrapError(err) == syscall.ERANGE {
+ listSize *= 2
+ continue
+ }
+ return nil, errors.Wrapf(err, "error listing extended attributes of %q", path)
+ }
+ list = list[:size]
+ break
+ }
+ if listSize >= maxSize {
+ return nil, errors.Errorf("unable to read list of attributes for %q: size would have been too big", path)
+ }
+ m := make(map[string]string)
+ for _, attribute := range strings.Split(string(list), string('\000')) {
+ if isRelevantXattr(attribute) {
+ attributeSize := 64 * 1024
+ var attributeValue []byte
+ for attributeSize < maxSize {
+ attributeValue = make([]byte, attributeSize)
+ size, err := unix.Lgetxattr(path, attribute, attributeValue)
+ if err != nil {
+ if unwrapError(err) == syscall.ERANGE {
+ attributeSize *= 2
+ continue
+ }
+ return nil, errors.Wrapf(err, "error getting value of extended attribute %q on %q", attribute, path)
+ }
+ m[attribute] = string(attributeValue[:size])
+ break
+ }
+ if attributeSize >= maxSize {
+ return nil, errors.Errorf("unable to read attribute %q of %q: size would have been too big", attribute, path)
+ }
+ }
+ }
+ return m, nil
+}
+
+// Lsetxattrs sets the relevant members of the specified extended attributes on the given file.
+func Lsetxattrs(path string, xattrs map[string]string) error {
+ for attribute, value := range xattrs {
+ if isRelevantXattr(attribute) {
+ if err := unix.Lsetxattr(path, attribute, []byte(value), 0); err != nil {
+ return errors.Wrapf(err, "error setting value of extended attribute %q on %q", attribute, path)
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/buildah/copier/xattrs_unsupported.go b/vendor/github.com/containers/buildah/copier/xattrs_unsupported.go
new file mode 100644
index 000000000..750d842f8
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/xattrs_unsupported.go
@@ -0,0 +1,15 @@
+// +build !linux,!netbsd,!freebsd,!darwin
+
+package copier
+
+const (
+ xattrsSupported = false
+)
+
+func Lgetxattrs(path string) (map[string]string, error) {
+ return nil, nil
+}
+
+func Lsetxattrs(path string, xattrs map[string]string) error {
+ return nil
+}
diff --git a/vendor/github.com/containers/buildah/digester.go b/vendor/github.com/containers/buildah/digester.go
index ff1bef2f5..870ab8d98 100644
--- a/vendor/github.com/containers/buildah/digester.go
+++ b/vendor/github.com/containers/buildah/digester.go
@@ -6,6 +6,7 @@ import (
"hash"
"io"
"sync"
+ "time"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@@ -82,6 +83,10 @@ func (t *tarFilterer) Close() error {
// newTarFilterer passes one or more tar archives through to an io.WriteCloser
// as a single archive, potentially calling filter to modify headers and
// contents as it goes.
+//
+// Note: if "filter" indicates that a given item should be skipped, there is no
+// guarantee that there will not be a subsequent item of type TypeLink, which
+// is a hard link, which points to the skipped item as the link target.
func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader)) io.WriteCloser {
pipeReader, pipeWriter := io.Pipe()
tarWriter := tar.NewWriter(writeCloser)
@@ -153,12 +158,20 @@ type tarDigester struct {
tarFilterer io.WriteCloser
}
+func modifyTarHeaderForDigesting(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader) {
+ zeroTime := time.Time{}
+ hdr.ModTime = zeroTime
+ hdr.AccessTime = zeroTime
+ hdr.ChangeTime = zeroTime
+ return false, false, nil
+}
+
func newTarDigester(contentType string) digester {
nested := newSimpleDigester(contentType)
digester := &tarDigester{
isOpen: true,
nested: nested,
- tarFilterer: nested,
+ tarFilterer: newTarFilterer(nested, modifyTarHeaderForDigesting),
}
return digester
}
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 9e692546b..fac079e45 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -4,11 +4,10 @@ go 1.12
require (
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784
- github.com/containers/common v0.19.0
- github.com/containers/image/v5 v5.5.1
+ github.com/containers/common v0.21.0
+ github.com/containers/image/v5 v5.5.2
github.com/containers/ocicrypt v1.0.3
- github.com/containers/storage v1.23.0
- github.com/cyphar/filepath-securejoin v0.2.2
+ github.com/containers/storage v1.23.3
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/go-units v0.4.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
@@ -27,8 +26,7 @@ require (
github.com/opencontainers/selinux v1.6.0
github.com/openshift/imagebuilder v1.1.6
github.com/pkg/errors v0.9.1
- github.com/seccomp/containers-golang v0.6.0
- github.com/seccomp/libseccomp-golang v0.9.1
+ github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf
github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v0.0.7
github.com/spf13/pflag v1.0.5
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index e7d10f739..463f2bdcc 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -52,10 +52,10 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 h1:rqUVLD8I859xRgUx/WMC3v7QAFqbLKZbs+0kqYboRJc=
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containers/common v0.19.0 h1:nya/Fh51kiyV0cAO31ejoNwvRAeYreymsO820yjfc3Y=
-github.com/containers/common v0.19.0/go.mod h1:+NUHV8V5Kmo260ja9Dxtr8ialrDnK4RNzyeEbSgmLac=
-github.com/containers/image/v5 v5.5.1 h1:h1FCOXH6Ux9/p/E4rndsQOC4yAdRU0msRTfLVeQ7FDQ=
-github.com/containers/image/v5 v5.5.1/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
+github.com/containers/common v0.21.0 h1:v2U9MrGw0vMgefQf0/uJYBsSnengxLbSORYqhCVEBs0=
+github.com/containers/common v0.21.0/go.mod h1:8w8SVwc+P2p1MOnRMbSKNWXt1Iwd2bKFu2LLZx55DTM=
+github.com/containers/image/v5 v5.5.2 h1:fv7FArz0zUnjH0W0l8t90CqWFlFcQrPP6Pug+9dUtVI=
+github.com/containers/image/v5 v5.5.2/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.2 h1:Q0/IPs8ohfbXNxEfyJ2pFVmvJu5BhqJUAmc6ES9NKbo=
@@ -64,8 +64,8 @@ github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6Gz
github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
github.com/containers/storage v1.20.2 h1:tw/uKRPDnmVrluIzer3dawTFG/bTJLP8IEUyHFhltYk=
github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc=
-github.com/containers/storage v1.23.0 h1:gYyNkBiihC2FvGiHOjOjpnfojYwgxpLVooTUlmD6pxs=
-github.com/containers/storage v1.23.0/go.mod h1:I1EIAA7B4OwWRSA0b4yq2AW1wjvvfcY0zLWQuwTa4zw=
+github.com/containers/storage v1.23.3 h1:6ZeQi+xKBXrbUXSSZvSs8HuKoNCPfRkXR4f+8TkiMsI=
+github.com/containers/storage v1.23.3/go.mod h1:0azTMiuBhArp/VUmH1o4DJAGaaH+qLtEu17pJ/iKJCg=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@@ -185,8 +185,8 @@ github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqy
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.8 h1:eLeJ3dr/Y9+XRfJT4l+8ZjmtB5RPJhucH2HeCV5+IZY=
github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.10.10 h1:a/y8CglcM7gLGYmlbP/stPE5sR3hbhFRUjCBfd/0B3I=
-github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.10.11 h1:K9z59aO18Aywg2b/WSgBaUX99mHy2BES18Cr5lBKZHk=
+github.com/klauspost/compress v1.10.11/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
@@ -304,10 +304,10 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/seccomp/containers-golang v0.6.0 h1:VWPMMIDr8pAtNjCX0WvLEEK9EQi5lAm4HtJbDtAtFvQ=
-github.com/seccomp/containers-golang v0.6.0/go.mod h1:Dd9mONHvW4YdbSzdm23yf2CFw0iqvqLhO0mEFvPIvm4=
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf h1:b0+ZBD3rohnkQ4q5duD1+RyTXTg9yk+qTOPMSQtapO0=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
@@ -435,7 +435,6 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 h1:sIky/MyNRSHTrdxfsiUSS4WIAMvInbeXljJz+jDjeYE=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 8ca94924a..b2c95fecd 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -13,6 +13,7 @@ import (
"strings"
"time"
+ "github.com/containers/buildah/copier"
"github.com/containers/buildah/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
@@ -21,6 +22,7 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go"
@@ -50,7 +52,7 @@ type containerImageRef struct {
layerID string
oconfig []byte
dconfig []byte
- created time.Time
+ created *time.Time
createdBy string
historyComment string
annotations map[string]string
@@ -58,7 +60,7 @@ type containerImageRef struct {
exporting bool
squash bool
emptyLayer bool
- tarPath func(path string) (io.ReadCloser, error)
+ idMappingOptions *IDMappingOptions
parent string
blobDirectory string
preEmptyLayers []v1.History
@@ -142,16 +144,25 @@ func computeLayerMIMEType(what string, layerCompression archive.Compression) (om
// Extract the container's whole filesystem as if it were a single layer.
func (i *containerImageRef) extractRootfs() (io.ReadCloser, error) {
+ var uidMap, gidMap []idtools.IDMap
mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
if err != nil {
return nil, errors.Wrapf(err, "error mounting container %q", i.containerID)
}
- rc, err := i.tarPath(mountPoint)
- if err != nil {
- return nil, errors.Wrapf(err, "error extracting rootfs from container %q", i.containerID)
- }
- return ioutils.NewReadCloserWrapper(rc, func() error {
- if err = rc.Close(); err != nil {
+ pipeReader, pipeWriter := io.Pipe()
+ go func() {
+ if i.idMappingOptions != nil {
+ uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap)
+ }
+ copierOptions := copier.GetOptions{
+ UIDMap: uidMap,
+ GIDMap: gidMap,
+ }
+ err = copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter)
+ pipeWriter.Close()
+ }()
+ return ioutils.NewReadCloserWrapper(pipeReader, func() error {
+ if err = pipeReader.Close(); err != nil {
err = errors.Wrapf(err, "error closing tar archive of container %q", i.containerID)
}
if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
@@ -167,7 +178,10 @@ func (i *containerImageRef) extractRootfs() (io.ReadCloser, error) {
// Build fresh copies of the container configuration structures so that we can edit them
// without making unintended changes to the original Builder.
func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) {
- created := i.created
+ created := time.Now().UTC()
+ if i.created != nil {
+ created = *i.created
+ }
// Build an empty image, and then decode over it.
oimage := v1.Image{}
@@ -285,7 +299,6 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
if err != nil {
return nil, err
}
- omitTimestamp := i.created.Equal(time.Unix(0, 0))
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
for _, layerID := range layers {
@@ -375,9 +388,9 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
return nil, errors.Wrapf(err, "error compressing %s", what)
}
writer := io.MultiWriter(writeCloser, srcHasher.Hash())
- // Zero out timestamps in the layer, if we're doing that for
+ // Use specified timestamps in the layer, if we're doing that for
// history entries.
- if omitTimestamp {
+ if i.created != nil {
nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) {
// Changing a zeroed field to a non-zero field
@@ -388,13 +401,13 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// changing the length) of the header that we
// write.
if !hdr.ModTime.IsZero() {
- hdr.ModTime = i.created
+ hdr.ModTime = *i.created
}
if !hdr.AccessTime.IsZero() {
- hdr.AccessTime = i.created
+ hdr.AccessTime = *i.created
}
if !hdr.ChangeTime.IsZero() {
- hdr.ChangeTime = i.created
+ hdr.ChangeTime = *i.created
}
return false, false, nil
})
@@ -414,7 +427,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
} else {
size = counter.Count
}
- logrus.Debugf("%s size is %d bytes", what, size)
+ logrus.Debugf("%s size is %d bytes, uncompressed digest %s, possibly-compressed digest %s", what, size, srcHasher.Digest().String(), destHasher.Digest().String())
// Rename the layer so that we can more easily find it by digest later.
finalBlobName := filepath.Join(path, destHasher.Digest().String())
if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
@@ -469,8 +482,12 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
}
appendHistory(i.preEmptyLayers)
+ created := time.Now().UTC()
+ if i.created != nil {
+ created = (*i.created).UTC()
+ }
onews := v1.History{
- Created: &i.created,
+ Created: &created,
CreatedBy: i.createdBy,
Author: oimage.Author,
Comment: i.historyComment,
@@ -478,7 +495,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
oimage.History = append(oimage.History, onews)
dnews := docker.V2S2History{
- Created: i.created,
+ Created: created,
CreatedBy: i.createdBy,
Author: dimage.Author,
Comment: i.historyComment,
@@ -693,9 +710,10 @@ func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.Ima
if err != nil {
return nil, errors.Wrapf(err, "error encoding docker-format image configuration %#v", b.Docker)
}
- created := time.Now().UTC()
+ var created *time.Time
if options.HistoryTimestamp != nil {
- created = options.HistoryTimestamp.UTC()
+ historyTimestampUTC := options.HistoryTimestamp.UTC()
+ created = &historyTimestampUTC
}
createdBy := b.CreatedBy()
if createdBy == "" {
@@ -705,10 +723,6 @@ func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.Ima
}
}
- if options.OmitTimestamp {
- created = time.Unix(0, 0).UTC()
- }
-
parent := ""
if b.FromImageID != "" {
parentDigest := digest.NewDigestFromEncoded(digest.Canonical, b.FromImageID)
@@ -735,12 +749,11 @@ func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.Ima
exporting: exporting,
squash: options.Squash,
emptyLayer: options.EmptyLayer && !options.Squash,
- tarPath: b.tarPath(&b.IDMappingOptions),
+ idMappingOptions: &b.IDMappingOptions,
parent: parent,
blobDirectory: options.BlobDirectory,
preEmptyLayers: b.PrependedEmptyLayers,
postEmptyLayers: b.AppendedEmptyLayers,
}
-
return ref, nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 185c93ad3..a8ada90d1 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -168,9 +168,9 @@ type BuildOptions struct {
SignBy string
// Architecture specifies the target architecture of the image to be built.
Architecture string
- // OmitTimestamp forces epoch 0 as created timestamp to allow for
- // deterministic, content-addressable builds.
- OmitTimestamp bool
+ // Timestamp sets the created timestamp to the specified time, allowing
+ // for deterministic, content-addressable builds.
+ Timestamp *time.Time
// OS is the specifies the operating system of the image to be built.
OS string
// MaxPullPushRetries is the maximum number of attempts we'll make to pull or push any one
@@ -183,6 +183,8 @@ type BuildOptions struct {
OciDecryptConfig *encconfig.DecryptConfig
// Jobs is the number of stages to run in parallel. If not specified it defaults to 1.
Jobs *int
+ // LogRusage logs resource usage for each step.
+ LogRusage bool
}
// BuildDockerfiles parses a set of one or more Dockerfiles (which may be
diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go
index f3ef584e6..b0ec1cda0 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go
@@ -24,6 +24,7 @@ import (
encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
+ digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/openshift/imagebuilder"
@@ -100,7 +101,7 @@ type Executor struct {
devices []configs.Device
signBy string
architecture string
- omitTimestamp bool
+ timestamp *time.Time
os string
maxPullPushRetries int
retryPullPushDelay time.Duration
@@ -110,6 +111,7 @@ type Executor struct {
stagesLock sync.Mutex
stagesSemaphore *semaphore.Weighted
jobs int
+ logRusage bool
}
// NewExecutor creates a new instance of the imagebuilder.Executor interface.
@@ -152,6 +154,11 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
jobs = *options.Jobs
}
+ writer := options.ReportWriter
+ if options.Quiet {
+ writer = ioutil.Discard
+ }
+
exec := Executor{
stages: make(map[string]*StageExecutor),
store: store,
@@ -174,7 +181,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
in: options.In,
out: options.Out,
err: options.Err,
- reportWriter: options.ReportWriter,
+ reportWriter: writer,
isolation: options.Isolation,
namespaceOptions: options.NamespaceOptions,
configureNetwork: options.ConfigureNetwork,
@@ -201,13 +208,14 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
devices: devices,
signBy: options.SignBy,
architecture: options.Architecture,
- omitTimestamp: options.OmitTimestamp,
+ timestamp: options.Timestamp,
os: options.OS,
maxPullPushRetries: options.MaxPullPushRetries,
retryPullPushDelay: options.PullPushRetryDelay,
ociDecryptConfig: options.OciDecryptConfig,
terminatedStage: make(map[string]struct{}),
jobs: jobs,
+ logRusage: options.LogRusage,
}
if exec.err == nil {
exec.err = os.Stderr
@@ -328,22 +336,22 @@ func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebu
}
}
-// getImageHistory returns the history of imageID.
-func (b *Executor) getImageHistory(ctx context.Context, imageID string) ([]v1.History, error) {
+// getImageHistoryAndDiffIDs returns the history and diff IDs list of imageID.
+func (b *Executor) getImageHistoryAndDiffIDs(ctx context.Context, imageID string) ([]v1.History, []digest.Digest, error) {
imageRef, err := is.Transport.ParseStoreReference(b.store, "@"+imageID)
if err != nil {
- return nil, errors.Wrapf(err, "error getting image reference %q", imageID)
+ return nil, nil, errors.Wrapf(err, "error getting image reference %q", imageID)
}
ref, err := imageRef.NewImage(ctx, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error creating new image from reference to image %q", imageID)
+ return nil, nil, errors.Wrapf(err, "error creating new image from reference to image %q", imageID)
}
defer ref.Close()
oci, err := ref.OCIConfig(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "error getting possibly-converted OCI config of image %q", imageID)
+ return nil, nil, errors.Wrapf(err, "error getting possibly-converted OCI config of image %q", imageID)
}
- return oci.History, nil
+ return oci.History, oci.RootFS.DiffIDs, nil
}
func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, err error) {
diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
index f9cf2312a..0b1db01a3 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
@@ -12,8 +12,9 @@ import (
"time"
"github.com/containers/buildah"
+ "github.com/containers/buildah/copier"
buildahdocker "github.com/containers/buildah/docker"
- "github.com/containers/buildah/pkg/chrootuser"
+ "github.com/containers/buildah/pkg/rusage"
"github.com/containers/buildah/util"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker/reference"
@@ -23,8 +24,8 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
- securejoin "github.com/cyphar/filepath-securejoin"
docker "github.com/fsouza/go-dockerclient"
+ digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/openshift/imagebuilder"
"github.com/openshift/imagebuilder/dockerfile/parser"
@@ -55,7 +56,6 @@ type StageExecutor struct {
volumeCache map[string]string
volumeCacheInfo map[string]os.FileInfo
mountPoint string
- copyFrom string // Used to keep track of the --from flag from COPY and ADD
output string
containerIDs []string
stage *imagebuilder.Stage
@@ -258,166 +258,11 @@ func (s *StageExecutor) volumeCacheRestore() error {
return nil
}
-// digestSpecifiedContent digests any content that this next instruction would add to
-// the image, returning the digester if there is any, or nil otherwise. We
-// don't care about the details of where in the filesystem the content actually
-// goes, because we're not actually going to add it here, so this is less
-// involved than Copy().
-func (s *StageExecutor) digestSpecifiedContent(ctx context.Context, node *parser.Node, argValues []string, envValues []string) (string, error) {
- // No instruction: done.
- if node == nil {
- return "", nil
- }
-
- // Not adding content: done.
- switch strings.ToUpper(node.Value) {
- default:
- return "", nil
- case "ADD", "COPY":
- }
-
- // Pull out everything except the first node (the instruction) and the
- // last node (the destination).
- var srcs []string
- destination := node
- for destination.Next != nil {
- destination = destination.Next
- if destination.Next != nil {
- srcs = append(srcs, destination.Value)
- }
- }
-
- var sources []string
- var idMappingOptions *buildah.IDMappingOptions
- contextDir := s.executor.contextDir
- for _, flag := range node.Flags {
- if strings.HasPrefix(flag, "--from=") {
- // Flag says to read the content from another
- // container. Update the ID mappings and
- // all-content-comes-from-below-this-directory value.
- from := strings.TrimPrefix(flag, "--from=")
-
- // If from has an argument within it, resolve it to its
- // value. Otherwise just return the value found.
- var fromErr error
- from, fromErr = imagebuilder.ProcessWord(from, s.stage.Builder.Arguments())
- if fromErr != nil {
- return "", errors.Wrapf(fromErr, "unable to resolve argument %q", from)
- }
- if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil {
- return "", err
- }
- if other, ok := s.executor.stages[from]; ok && other.index < s.index {
- contextDir = other.mountPoint
- idMappingOptions = &other.builder.IDMappingOptions
- } else if builder, ok := s.executor.containerMap[from]; ok {
- contextDir = builder.MountPoint
- idMappingOptions = &builder.IDMappingOptions
- } else {
- return "", errors.Errorf("the stage %q has not been built", from)
- }
- }
- }
-
- varValues := append(argValues, envValues...)
- for _, src := range srcs {
- // If src has an argument within it, resolve it to its
- // value. Otherwise just return the value found.
- name, err := imagebuilder.ProcessWord(src, varValues)
- if err != nil {
- return "", errors.Wrapf(err, "unable to resolve source %q", src)
- }
- src = name
- if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
- // Source is a URL. TODO: cache this content
- // somewhere, so that we can avoid pulling it down
- // again if we end up needing to drop it into the
- // filesystem.
- sources = append(sources, src)
- } else {
- // Source is not a URL, so it's a location relative to
- // the all-content-comes-from-below-this-directory
- // directory. Also raise an error if the src escapes
- // the context directory.
- contextSrc, err := securejoin.SecureJoin(contextDir, src)
- if err == nil && strings.HasPrefix(src, "../") {
- err = errors.New("escaping context directory error")
- }
- if err != nil {
- return "", errors.Wrapf(err, "forbidden path for %q, it is outside of the build context %q", src, contextDir)
- }
- sources = append(sources, contextSrc)
- }
- }
- // If the all-content-comes-from-below-this-directory is the build
- // context, read its .dockerignore.
- var excludes []string
- if contextDir == s.executor.contextDir {
- var err error
- if excludes, err = imagebuilder.ParseDockerignore(contextDir); err != nil {
- return "", errors.Wrapf(err, "error parsing .dockerignore in %s", contextDir)
- }
- }
- // Restart the digester and have it do a dry-run copy to compute the
- // digest information.
- options := buildah.AddAndCopyOptions{
- Excludes: excludes,
- ContextDir: contextDir,
- IDMappingOptions: idMappingOptions,
- DryRun: true,
- }
- s.builder.ContentDigester.Restart()
- download := strings.ToUpper(node.Value) == "ADD"
-
- // If destination.Value has an argument within it, resolve it to its
- // value. Otherwise just return the value found.
- destValue, destErr := imagebuilder.ProcessWord(destination.Value, varValues)
- if destErr != nil {
- return "", errors.Wrapf(destErr, "unable to resolve destination %q", destination.Value)
- }
- err := s.builder.Add(destValue, download, options, sources...)
- if err != nil {
- return "", errors.Wrapf(err, "error dry-running %q", node.Original)
- }
- // Return the formatted version of the digester's result.
- contentDigest := ""
- prefix, digest := s.builder.ContentDigester.Digest()
- if prefix != "" {
- prefix += ":"
- }
- if digest.Validate() == nil {
- contentDigest = prefix + digest.Encoded()
- }
- return contentDigest, nil
-}
-
// Copy copies data into the working tree. The "Download" field is how
// imagebuilder tells us the instruction was "ADD" and not "COPY".
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
s.builder.ContentDigester.Restart()
for _, copy := range copies {
- // Check the file and see if part of it is a symlink.
- // Convert it to the target if so. To be ultrasafe
- // do the same for the mountpoint.
- hadFinalPathSeparator := len(copy.Dest) > 0 && copy.Dest[len(copy.Dest)-1] == os.PathSeparator
- secureMountPoint, err := securejoin.SecureJoin("", s.mountPoint)
- if err != nil {
- return errors.Wrapf(err, "error resolving symlinks for copy destination %s", copy.Dest)
- }
- finalPath, err := securejoin.SecureJoin(secureMountPoint, copy.Dest)
- if err != nil {
- return errors.Wrapf(err, "error resolving symlinks for copy destination %s", copy.Dest)
- }
- if !strings.HasPrefix(finalPath, secureMountPoint) {
- return errors.Wrapf(err, "error resolving copy destination %s", copy.Dest)
- }
- copy.Dest = strings.TrimPrefix(finalPath, secureMountPoint)
- if len(copy.Dest) == 0 || copy.Dest[len(copy.Dest)-1] != os.PathSeparator {
- if hadFinalPathSeparator {
- copy.Dest += string(os.PathSeparator)
- }
- }
-
if copy.Download {
logrus.Debugf("ADD %#v, %#v", excludes, copy)
} else {
@@ -432,12 +277,21 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
// all-content-comes-from-below-this-directory value.
var idMappingOptions *buildah.IDMappingOptions
var copyExcludes []string
+ stripSetuid := false
+ stripSetgid := false
+ preserveOwnership := false
contextDir := s.executor.contextDir
if len(copy.From) > 0 {
- if isStage, err := s.executor.waitForStage(s.ctx, copy.From, s.stages[:s.index]); isStage && err != nil {
+ // If from has an argument within it, resolve it to its
+ // value. Otherwise just return the value found.
+ from, fromErr := imagebuilder.ProcessWord(copy.From, s.stage.Builder.Arguments())
+ if fromErr != nil {
+ return errors.Wrapf(fromErr, "unable to resolve argument %q", copy.From)
+ }
+ if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
return err
}
- if other, ok := s.executor.stages[copy.From]; ok && other.index < s.index {
+ if other, ok := s.executor.stages[from]; ok && other.index < s.index {
contextDir = other.mountPoint
idMappingOptions = &other.builder.IDMappingOptions
} else if builder, ok := s.executor.containerMap[copy.From]; ok {
@@ -446,9 +300,12 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
} else {
return errors.Errorf("the stage %q has not been built", copy.From)
}
+ preserveOwnership = true
copyExcludes = excludes
} else {
copyExcludes = append(s.executor.excludes, excludes...)
+ stripSetuid = true // did this change between 18.06 and 19.03?
+ stripSetgid = true // did this change between 18.06 and 19.03?
}
for _, src := range copy.Src {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
@@ -460,53 +317,20 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
return errors.Errorf("source can't be a URL for COPY")
}
} else {
- // Treat the source, which is not a URL, as a
- // location relative to the
- // all-content-comes-from-below-this-directory
- // directory. Also raise an error if the src
- // escapes the context directory.
- srcSecure, err := securejoin.SecureJoin(contextDir, src)
- if err == nil && strings.HasPrefix(src, "../") {
- err = errors.New("escaping context directory error")
- }
- if err != nil {
- return errors.Wrapf(err, "forbidden path for %q, it is outside of the build context %q", src, contextDir)
- }
- if hadFinalPathSeparator {
- // If destination is a folder, we need to take extra care to
- // ensure that files are copied with correct names (since
- // resolving a symlink may result in a different name).
- _, srcName := filepath.Split(src)
- _, srcNameSecure := filepath.Split(srcSecure)
- if srcName != srcNameSecure {
- options := buildah.AddAndCopyOptions{
- Chown: copy.Chown,
- ContextDir: contextDir,
- Excludes: copyExcludes,
- IDMappingOptions: idMappingOptions,
- }
- // If we've a tar file, it will create a directory using the name of the tar
- // file if we don't blank it out.
- if strings.HasSuffix(srcName, ".tar") || strings.HasSuffix(srcName, ".gz") {
- srcName = ""
- }
- if err := s.builder.Add(filepath.Join(copy.Dest, srcName), copy.Download, options, srcSecure); err != nil {
- return err
- }
- continue
- }
- }
- sources = append(sources, srcSecure)
+ sources = append(sources, filepath.Join(contextDir, src))
}
}
options := buildah.AddAndCopyOptions{
- Chown: copy.Chown,
- ContextDir: contextDir,
- Excludes: copyExcludes,
- IDMappingOptions: idMappingOptions,
+ Chown: copy.Chown,
+ PreserveOwnership: preserveOwnership,
+ ContextDir: contextDir,
+ Excludes: copyExcludes,
+ IDMappingOptions: idMappingOptions,
+ StripSetuidBit: stripSetuid,
+ StripSetgidBit: stripSetgid,
}
if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil {
- return err
+ return errors.Wrapf(err, "error adding sources %v", sources)
}
}
return nil
@@ -767,6 +591,7 @@ func (s *StageExecutor) getImageRootfs(ctx context.Context, image string) (mount
// Execute runs each of the steps in the stage's parsed tree, in turn.
func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, ref reference.Canonical, err error) {
+ var resourceUsage rusage.Rusage
stage := s.stage
ib := stage.Builder
checkForLayers := s.executor.layers && s.executor.useCache
@@ -789,6 +614,30 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
s.executor.stagesLock.Unlock()
+ // Set things up so that we can log resource usage as we go.
+ logRusage := func() {
+ if rusage.Supported() {
+ usage, err := rusage.Get()
+ if err != nil {
+ fmt.Fprintf(s.executor.out, "error gathering resource usage information: %v\n", err)
+ return
+ }
+ if !s.executor.quiet && s.executor.logRusage {
+ fmt.Fprintf(s.executor.out, "%s\n", rusage.FormatDiff(usage.Subtract(resourceUsage)))
+ }
+ resourceUsage = usage
+ }
+ }
+
+ // Start counting resource usage before we potentially pull a base image.
+ if rusage.Supported() {
+ if resourceUsage, err = rusage.Get(); err != nil {
+ return "", nil, err
+ }
+ // Log the final incremental resource usage counter before we return.
+ defer logRusage()
+ }
+
// Create the (first) working container for this stage. Reinitializing
// the imagebuilder configuration may alter the list of steps we have,
// so take a snapshot of them *after* that.
@@ -824,7 +673,6 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
imgID = imgID[0:11]
}
if s.executor.iidfile == "" {
-
fmt.Fprintf(s.executor.out, "--> %s\n", imgID)
}
}
@@ -859,6 +707,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
for i, node := range children {
+ logRusage()
moreInstructions := i < len(children)-1
lastInstruction := !moreInstructions
// Resolve any arguments in this instruction.
@@ -871,11 +720,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
s.executor.log("%s", step.Original)
}
- // Check if there's a --from if the step command is COPY or
- // ADD. Set copyFrom to point to either the context directory
- // or the root of the container from the specified stage.
+ // Check if there's a --from if the step command is COPY.
// Also check the chown flag for validity.
- s.copyFrom = s.executor.contextDir
for _, flag := range step.Flags {
command := strings.ToUpper(step.Command)
// chown and from flags should have an '=' sign, '--chown=' or '--from='
@@ -886,31 +732,27 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, errors.Errorf("ADD only supports the --chown=<uid:gid> flag")
}
if strings.Contains(flag, "--from") && command == "COPY" {
- var mountPoint string
arr := strings.Split(flag, "=")
if len(arr) != 2 {
return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
}
- // If the source's name corresponds to the
- // result of an earlier stage, wait for that
- // stage to finish being built.
-
// If arr[1] has an argument within it, resolve it to its
// value. Otherwise just return the value found.
- var arr1Err error
- arr[1], arr1Err = imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
- if arr1Err != nil {
- return "", nil, errors.Wrapf(arr1Err, "unable to resolve argument %q", arr[1])
+ from, fromErr := imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
+ if fromErr != nil {
+ return "", nil, errors.Wrapf(fromErr, "unable to resolve argument %q", arr[1])
}
- if isStage, err := s.executor.waitForStage(ctx, arr[1], s.stages[:s.index]); isStage && err != nil {
+ // If the source's name corresponds to the
+ // result of an earlier stage, wait for that
+ // stage to finish being built.
+ if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil {
return "", nil, err
}
- if otherStage, ok := s.executor.stages[arr[1]]; ok && otherStage.index < s.index {
- mountPoint = otherStage.mountPoint
- } else if mountPoint, err = s.getImageRootfs(ctx, arr[1]); err != nil {
- return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, arr[1])
+ if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
+ break
+ } else if _, err = s.getImageRootfs(ctx, from); err != nil {
+ return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, from)
}
- s.copyFrom = mountPoint
break
}
}
@@ -933,9 +775,14 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
// In case we added content, retrieve its digest.
- addedContentDigest, err := s.digestSpecifiedContent(ctx, node, ib.Arguments(), ib.Config().Env)
- if err != nil {
- return "", nil, err
+ addedContentType, addedContentDigest := s.builder.ContentDigester.Digest()
+ addedContentSummary := addedContentType
+ if addedContentDigest != "" {
+ if addedContentSummary != "" {
+ addedContentSummary = addedContentSummary + ":"
+ }
+ addedContentSummary = addedContentSummary + addedContentDigest.Encoded()
+ logrus.Debugf("added content %s", addedContentSummary)
}
if moreInstructions {
// There are still more instructions to process
@@ -943,16 +790,17 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// instruction in the history that we'll write
// for the image when we eventually commit it.
now := time.Now()
- s.builder.AddPrependedEmptyLayer(&now, s.getCreatedBy(node, addedContentDigest), "", "")
+ s.builder.AddPrependedEmptyLayer(&now, s.getCreatedBy(node, addedContentSummary), "", "")
continue
} else {
// This is the last instruction for this stage,
// so we should commit this container to create
- // an image, but only if it's the last one, or
- // if it's used as the basis for a later stage.
+ // an image, but only if it's the last stage,
+ // or if it's used as the basis for a later
+ // stage.
if lastStage || imageIsUsedLater {
logCommit(s.output, i)
- imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentDigest), false, s.output)
+ imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
}
@@ -966,10 +814,11 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// We're in a multi-layered build.
var (
- commitName string
- cacheID string
- err error
- rebase bool
+ commitName string
+ cacheID string
+ err error
+ rebase bool
+ addedContentSummary string
)
// If we have to commit for this instruction, only assign the
@@ -978,46 +827,47 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
commitName = s.output
}
- // If we're using the cache, and we've managed to stick with
- // cached images so far, look for one that matches what we
- // expect to produce for this instruction.
- if checkForLayers && !(s.executor.squash && lastInstruction && lastStage) {
- addedContentDigest, err := s.digestSpecifiedContent(ctx, node, ib.Arguments(), ib.Config().Env)
- if err != nil {
- return "", nil, err
- }
- cacheID, err = s.intermediateImageExists(ctx, node, addedContentDigest)
+ // Check if there's already an image based on our parent that
+ // has the same change that we're about to make, so far as we
+ // can tell.
+ if checkForLayers {
+ cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
if err != nil {
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
}
- if cacheID != "" {
- // Note the cache hit.
- logCacheHit(cacheID)
- } else {
- // We're not going to find any more cache hits.
- checkForLayers = false
- }
}
- if cacheID != "" {
- // A suitable cached image was found, so just reuse it.
- // If we need to name the resulting image because it's
- // the last step in this stage, add the name to the
- // image.
- imgID = cacheID
- if commitName != "" {
- logCommit(commitName, i)
- if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil {
- return "", nil, err
+ // If we didn't find a cache entry, or we need to add content
+ // to find the digest of the content to check for a cached
+ // image, run the step so that we can check if the result
+ // matches a cache.
+ if cacheID == "" {
+ // Process the instruction directly.
+ if err = ib.Run(step, s, noRunsRemaining); err != nil {
+ logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
+ return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
+ }
+
+ // In case we added content, retrieve its digest.
+ addedContentType, addedContentDigest := s.builder.ContentDigester.Digest()
+ addedContentSummary = addedContentType
+ if addedContentDigest != "" {
+ if addedContentSummary != "" {
+ addedContentSummary = addedContentSummary + ":"
+ }
+ addedContentSummary = addedContentSummary + addedContentDigest.Encoded()
+ logrus.Debugf("added content %s", addedContentSummary)
+ }
+
+ // Check if there's already an image based on our parent that
+ // has the same change that we just made.
+ if checkForLayers {
+ cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
+ if err != nil {
+ return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
}
- logImageID(imgID)
}
- // Update our working container to be based off of the
- // cached image, if we might need to use it as a basis
- // for the next instruction, or if we need the root
- // filesystem to match the image contents for the sake
- // of a later stage that wants to copy content from it.
- rebase = moreInstructions || rootfsIsUsedLater
+ } else {
// If the instruction would affect our configuration,
// process the configuration change so that, if we fall
// off the cache path, the filesystem changes from the
@@ -1031,34 +881,41 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
}
- } else {
- // If we didn't find a cached image that we could just reuse,
- // process the instruction directly.
- err := ib.Run(step, s, noRunsRemaining)
- if err != nil {
- logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
- return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
- }
- // In case we added content, retrieve its digest.
- addedContentDigest, err := s.digestSpecifiedContent(ctx, node, ib.Arguments(), ib.Config().Env)
- if err != nil {
- return "", nil, err
+ }
+
+ if cacheID != "" && !(s.executor.squash && lastInstruction) {
+ logCacheHit(cacheID)
+ // A suitable cached image was found, so we can just
+ // reuse it. If we need to add a name to the resulting
+ // image because it's the last step in this stage, add
+ // the name to the image.
+ imgID = cacheID
+ if commitName != "" {
+ logCommit(commitName, i)
+ if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil {
+ return "", nil, err
+ }
}
- // Create a new image, maybe with a new layer.
+ } else {
+ // We're not going to find any more cache hits, so we
+ // can stop looking for them.
+ checkForLayers = false
+ // Create a new image, maybe with a new layer, with the
+ // name for this stage if it's the last instruction.
logCommit(s.output, i)
- imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentDigest), !s.stepRequiresLayer(step), commitName)
+ imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
}
- logImageID(imgID)
- // We only need to build a new container rootfs
- // using this image if we plan on making
- // further changes to it. Subsequent stages
- // that just want to use the rootfs as a source
- // for COPY or ADD will be content with what we
- // already have.
- rebase = moreInstructions
}
+ logImageID(imgID)
+
+ // Update our working container to be based off of the cached
+ // image, if we might need to use it as a basis for the next
+ // instruction, or if we need the root filesystem to match the
+ // image contents for the sake of a later stage that wants to
+ // copy content from it.
+ rebase = moreInstructions || rootfsIsUsedLater
if rebase {
// Since we either committed the working container or
@@ -1105,29 +962,58 @@ func historyEntriesEqual(base, derived v1.History) bool {
return true
}
-// historyMatches returns true if a candidate history matches the history of our
-// base image (if we have one), plus the current instruction.
+// historyAndDiffIDsMatch returns true if a candidate history matches the
+// history of our base image (if we have one), plus the current instruction,
+// and if the list of diff IDs for the images do for the part of the history
+// that we're comparing.
// Used to verify whether a cache of the intermediate image exists and whether
// to run the build again.
-func (s *StageExecutor) historyMatches(baseHistory []v1.History, child *parser.Node, history []v1.History, addedContentDigest string) bool {
- if len(baseHistory) >= len(history) {
- return false
- }
- if len(history)-len(baseHistory) != 1 {
+func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDiffIDs []digest.Digest, child *parser.Node, history []v1.History, diffIDs []digest.Digest, addedContentSummary string, buildAddsLayer bool) bool {
+ // our history should be as long as the base's, plus one entry for what
+ // we're doing
+ if len(history) != len(baseHistory)+1 {
return false
}
+ // check that each entry in the base history corresponds to an entry in
+ // our history, and count how many of them add a layer diff
+ expectedDiffIDs := 0
for i := range baseHistory {
if !historyEntriesEqual(baseHistory[i], history[i]) {
return false
}
+ if !baseHistory[i].EmptyLayer {
+ expectedDiffIDs++
+ }
+ }
+ if len(baseDiffIDs) != expectedDiffIDs {
+ return false
+ }
+ if buildAddsLayer {
+ // we're adding a layer, so we should have exactly one more
+ // layer than the base image
+ if len(diffIDs) != expectedDiffIDs+1 {
+ return false
+ }
+ } else {
+ // we're not adding a layer, so we should have exactly the same
+ // layers as the base image
+ if len(diffIDs) != expectedDiffIDs {
+ return false
+ }
+ }
+ // compare the diffs for the layers that we should have in common
+ for i := range baseDiffIDs {
+ if diffIDs[i] != baseDiffIDs[i] {
+ return false
+ }
}
- return history[len(baseHistory)].CreatedBy == s.getCreatedBy(child, addedContentDigest)
+ return history[len(baseHistory)].CreatedBy == s.getCreatedBy(child, addedContentSummary)
}
// getCreatedBy returns the command the image at node will be created by. If
// the passed-in CompositeDigester is not nil, it is assumed to have the digest
// information for the content if the node is ADD or COPY.
-func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentDigest string) string {
+func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary string) string {
if node == nil {
return "/bin/sh"
}
@@ -1143,7 +1029,7 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentDigest strin
for destination.Next != nil {
destination = destination.Next
}
- return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentDigest + " in " + destination.Value + " "
+ return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentSummary + " in " + destination.Value + " "
default:
return "/bin/sh -c #(nop) " + node.Original
}
@@ -1212,40 +1098,54 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st
// intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build.
// It verifies this by checking the parent of the top layer of the image and the history.
-func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string) (string, error) {
+func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) {
// Get the list of images available in the image store
images, err := s.executor.store.Images()
if err != nil {
return "", errors.Wrap(err, "error getting image list from store")
}
var baseHistory []v1.History
+ var baseDiffIDs []digest.Digest
if s.builder.FromImageID != "" {
- baseHistory, err = s.executor.getImageHistory(ctx, s.builder.FromImageID)
+ baseHistory, baseDiffIDs, err = s.executor.getImageHistoryAndDiffIDs(ctx, s.builder.FromImageID)
if err != nil {
return "", errors.Wrapf(err, "error getting history of base image %q", s.builder.FromImageID)
}
}
for _, image := range images {
var imageTopLayer *storage.Layer
+ var imageParentLayerID string
if image.TopLayer != "" {
imageTopLayer, err = s.executor.store.Layer(image.TopLayer)
if err != nil {
return "", errors.Wrapf(err, "error getting top layer info")
}
+ // Figure out which layer from this image we should
+ // compare our container's base layer to.
+ imageParentLayerID = imageTopLayer.ID
+ // If we haven't added a layer here, then our base
+ // layer should be the same as the image's layer. If
+ // did add a layer, then our base layer should be the
+ // same as the parent of the image's layer.
+ if buildAddsLayer {
+ imageParentLayerID = imageTopLayer.Parent
+ }
}
// If the parent of the top layer of an image is equal to the current build image's top layer,
// it means that this image is potentially a cached intermediate image from a previous
- // build. Next we double check that the history of this image is equivalent to the previous
+ // build.
+ if s.builder.TopLayer != imageParentLayerID {
+ continue
+ }
+ // Next we double check that the history of this image is equivalent to the previous
// lines in the Dockerfile up till the point we are at in the build.
- if imageTopLayer == nil || (s.builder.TopLayer != "" && (imageTopLayer.Parent == s.builder.TopLayer || imageTopLayer.ID == s.builder.TopLayer)) {
- history, err := s.executor.getImageHistory(ctx, image.ID)
- if err != nil {
- return "", errors.Wrapf(err, "error getting history of %q", image.ID)
- }
- // children + currNode is the point of the Dockerfile we are currently at.
- if s.historyMatches(baseHistory, currNode, history, addedContentDigest) {
- return image.ID, nil
- }
+ history, diffIDs, err := s.executor.getImageHistoryAndDiffIDs(ctx, image.ID)
+ if err != nil {
+ return "", errors.Wrapf(err, "error getting history of %q", image.ID)
+ }
+ // children + currNode is the point of the Dockerfile we are currently at.
+ if s.historyAndDiffIDsMatch(baseHistory, baseDiffIDs, currNode, history, diffIDs, addedContentDigest, buildAddsLayer) {
+ return image.ID, nil
}
}
return "", nil
@@ -1355,7 +1255,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
SignBy: s.executor.signBy,
MaxRetries: s.executor.maxPullPushRetries,
RetryDelay: s.executor.retryPullPushDelay,
- OmitTimestamp: s.executor.omitTimestamp,
+ HistoryTimestamp: s.executor.timestamp,
}
imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options)
if err != nil {
@@ -1373,29 +1273,5 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
}
func (s *StageExecutor) EnsureContainerPath(path string) error {
- targetPath, err := securejoin.SecureJoin(s.mountPoint, path)
- if err != nil {
- return errors.Wrapf(err, "error ensuring container path %q", path)
- }
-
- _, err = os.Stat(targetPath)
- if err != nil && os.IsNotExist(err) {
- err = os.MkdirAll(targetPath, 0755)
- if err != nil {
- return errors.Wrapf(err, "error creating directory path %q", targetPath)
- }
- // get the uid and gid so that we can set the correct permissions on the
- // working directory
- uid, gid, _, err := chrootuser.GetUser(s.mountPoint, s.builder.User())
- if err != nil {
- return errors.Wrapf(err, "error getting uid and gid for user %q", s.builder.User())
- }
- if err = os.Chown(targetPath, int(uid), int(gid)); err != nil {
- return errors.Wrapf(err, "error setting ownership on %q", targetPath)
- }
- }
- if err != nil {
- return errors.Wrapf(err, "error ensuring container path %q", path)
- }
- return nil
+ return copier.Mkdir(s.mountPoint, path, copier.MkdirOptions{})
}
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index c1751bc8c..41d545bd8 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -65,7 +65,7 @@ type BudResults struct {
Logfile string
Loglevel int
NoCache bool
- OmitTimestamp bool
+ Timestamp int64
OS string
Platform string
Pull bool
@@ -82,6 +82,7 @@ type BudResults struct {
Target string
TLSVerify bool
Jobs int
+ LogRusage bool
}
// FromAndBugResults represents the results for common flags
@@ -164,7 +165,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.")
fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
fs.IntVar(&flags.Loglevel, "loglevel", 0, "adjust logging level (range from -2 to 3)")
- fs.BoolVar(&flags.OmitTimestamp, "omit-timestamp", false, "set created timestamp to epoch 0 to allow for deterministic builds")
+ fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set created timestamp to the specified epoch seconds to allow for deterministic builds, defaults to current time")
fs.StringVar(&flags.OS, "os", runtime.GOOS, "set the OS to the provided value instead of the current operating system of the host")
fs.StringVar(&flags.Platform, "platform", parse.DefaultPlatform(), "set the OS/ARCH to the provided value instead of the current operating system and architecture of the host (for example `linux/arm`)")
fs.BoolVar(&flags.Pull, "pull", true, "pull the image from the registry if newer or not present in store, if false, only pull the image if not present")
@@ -181,6 +182,10 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.StringVar(&flags.Target, "target", "", "set the target build stage to build")
fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel")
+ fs.BoolVar(&flags.LogRusage, "log-rusage", false, "log resource usage at each build step")
+ if err := fs.MarkHidden("log-rusage"); err != nil {
+ panic(fmt.Sprintf("error marking the log-rusage flag as hidden: %v", err))
+ }
return fs
}
diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage.go
new file mode 100644
index 000000000..7b1226d24
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage.go
@@ -0,0 +1,48 @@
+package rusage
+
+import (
+ "fmt"
+ "time"
+
+ units "github.com/docker/go-units"
+)
+
+// Rusage is a subset of a Unix-style resource usage counter for the current
+// process and its children. The counters are always 0 on platforms where the
+// system call is not available (i.e., systems where getrusage() doesn't
+// exist).
+type Rusage struct {
+ Date time.Time
+ Elapsed time.Duration
+ Utime, Stime time.Duration
+ Inblock, Outblock int64
+}
+
+// FormatDiff formats the result of rusage.Rusage.Subtract() for logging.
+func FormatDiff(diff Rusage) string {
+ return fmt.Sprintf("%s(system) %s(user) %s(elapsed) %s input %s output", diff.Stime.Round(time.Millisecond), diff.Utime.Round(time.Millisecond), diff.Elapsed.Round(time.Millisecond), units.HumanSize(float64(diff.Inblock*512)), units.HumanSize(float64(diff.Outblock*512)))
+}
+
+// Subtract subtracts the items in delta from r, and returns the difference.
+// The Date field is zeroed for easier comparison with the zero value for the
+// Rusage type.
+func (r Rusage) Subtract(baseline Rusage) Rusage {
+ return Rusage{
+ Elapsed: r.Date.Sub(baseline.Date),
+ Utime: r.Utime - baseline.Utime,
+ Stime: r.Stime - baseline.Stime,
+ Inblock: r.Inblock - baseline.Inblock,
+ Outblock: r.Outblock - baseline.Outblock,
+ }
+}
+
+// Get returns the counters for the current process and its children,
+// subtracting any values in the passed in "since" value, or an error.
+// The Elapsed field will always be set to zero.
+func Get() (Rusage, error) {
+ counters, err := get()
+ if err != nil {
+ return Rusage{}, err
+ }
+ return counters, nil
+}
diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go
new file mode 100644
index 000000000..5bfed45c1
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go
@@ -0,0 +1,35 @@
+// +build !windows
+
+package rusage
+
+import (
+ "syscall"
+ "time"
+
+ "github.com/pkg/errors"
+)
+
+func mkduration(tv syscall.Timeval) time.Duration {
+ return time.Duration(tv.Sec)*time.Second + time.Duration(tv.Usec)*time.Microsecond
+}
+
+func get() (Rusage, error) {
+ var rusage syscall.Rusage
+ err := syscall.Getrusage(syscall.RUSAGE_CHILDREN, &rusage)
+ if err != nil {
+ return Rusage{}, errors.Wrapf(err, "error getting resource usage")
+ }
+ r := Rusage{
+ Date: time.Now(),
+ Utime: mkduration(rusage.Utime),
+ Stime: mkduration(rusage.Stime),
+ Inblock: int64(rusage.Inblock), // nolint: unconvert
+ Outblock: int64(rusage.Oublock), // nolint: unconvert
+ }
+ return r, nil
+}
+
+// Supported returns true if resource usage counters are supported on this OS.
+func Supported() bool {
+ return true
+}
diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go
new file mode 100644
index 000000000..031c81402
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go
@@ -0,0 +1,18 @@
+// +build windows
+
+package rusage
+
+import (
+ "syscall"
+
+ "github.com/pkg/errors"
+)
+
+func get() (Rusage, error) {
+ return Rusage{}, errors.Wrapf(syscall.ENOTSUP, "error getting resource usage")
+}
+
+// Supported returns true if resource usage counters are supported on this OS.
+func Supported() bool {
+ return false
+}
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index f8d4bdeb6..be9a521d1 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -280,7 +280,7 @@ func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageRefer
}()
logrus.Debugf("copying %q to %q", transports.ImageName(srcRef), destName)
- if _, err := retryCopyImage(ctx, policyContext, maybeCachedDestRef, srcRef, srcRef, "pull", getCopyOptions(store, options.ReportWriter, sc, nil, "", options.RemoveSignatures, "", nil, nil, options.OciDecryptConfig), options.MaxRetries, options.RetryDelay); err != nil {
+ if _, err := retryCopyImage(ctx, policyContext, maybeCachedDestRef, srcRef, srcRef, getCopyOptions(store, options.ReportWriter, sc, nil, "", options.RemoveSignatures, "", nil, nil, options.OciDecryptConfig), options.MaxRetries, options.RetryDelay); err != nil {
logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", transports.ImageName(srcRef), destName, err)
return nil, err
}
diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go
index e21e3cd91..d83b3a5cc 100644
--- a/vendor/github.com/containers/buildah/run_linux.go
+++ b/vendor/github.com/containers/buildah/run_linux.go
@@ -316,7 +316,7 @@ func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator)
return nil
}
-func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) {
+func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) {
var mounts []specs.Mount
hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID}
// Add temporary copies of the contents of volume locations at the
@@ -359,7 +359,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWit
if err = os.Chown(volumePath, int(stat.Sys().(*syscall.Stat_t).Uid), int(stat.Sys().(*syscall.Stat_t).Gid)); err != nil {
return nil, errors.Wrapf(err, "error chowning directory %q for volume %q", volumePath, volume)
}
- if err = copyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(errors.Cause(err)) {
+ if err = extractWithTar(mountPoint, srcPath, volumePath); err != nil && !os.IsNotExist(errors.Cause(err)) {
return nil, errors.Wrapf(err, "error populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath)
}
}
@@ -483,8 +483,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
- copyWithTar := b.copyWithTar(nil, nil, nil, false)
- builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes, int(rootUID), int(rootGID))
+ builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, builtinVolumes, int(rootUID), int(rootGID))
if err != nil {
return err
}
@@ -864,12 +863,12 @@ func runUsingRuntime(isolation Isolation, options RunOptions, configureNetwork b
stat := exec.Command(runtime, args...)
stat.Dir = bundlePath
stat.Stderr = os.Stderr
- stateOutput, stateErr := stat.Output()
- if stateErr != nil {
- return 1, errors.Wrapf(stateErr, "error reading container state")
+ stateOutput, err := stat.Output()
+ if err != nil {
+ return 1, errors.Wrapf(err, "error reading container state (got output: %q)", string(stateOutput))
}
if err = json.Unmarshal(stateOutput, &state); err != nil {
- return 1, errors.Wrapf(stateErr, "error parsing container state %q", string(stateOutput))
+ return 1, errors.Wrapf(err, "error parsing container state %q", string(stateOutput))
}
switch state.Status {
case "running":
diff --git a/vendor/github.com/containers/buildah/seccomp.go b/vendor/github.com/containers/buildah/seccomp.go
index a435b5f71..fc7811098 100644
--- a/vendor/github.com/containers/buildah/seccomp.go
+++ b/vendor/github.com/containers/buildah/seccomp.go
@@ -5,9 +5,9 @@ package buildah
import (
"io/ioutil"
+ "github.com/containers/common/pkg/seccomp"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
- seccomp "github.com/seccomp/containers-golang"
)
func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
diff --git a/vendor/github.com/containers/buildah/selinux.go b/vendor/github.com/containers/buildah/selinux.go
index e64eb6112..fea863165 100644
--- a/vendor/github.com/containers/buildah/selinux.go
+++ b/vendor/github.com/containers/buildah/selinux.go
@@ -7,6 +7,10 @@ import (
selinux "github.com/opencontainers/selinux/go-selinux"
)
+func selinuxGetEnabled() bool {
+ return selinux.GetEnabled()
+}
+
func setupSelinux(g *generate.Generator, processLabel, mountLabel string) {
if processLabel != "" && selinux.GetEnabled() {
g.SetProcessSelinuxLabel(processLabel)
diff --git a/vendor/github.com/containers/buildah/selinux_unsupported.go b/vendor/github.com/containers/buildah/selinux_unsupported.go
index 0aa7c46e4..fb9213e29 100644
--- a/vendor/github.com/containers/buildah/selinux_unsupported.go
+++ b/vendor/github.com/containers/buildah/selinux_unsupported.go
@@ -6,5 +6,9 @@ import (
"github.com/opencontainers/runtime-tools/generate"
)
+func selinuxGetEnabled() bool {
+ return false
+}
+
func setupSelinux(g *generate.Generator, processLabel, mountLabel string) {
}
diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go
index f95c5ba57..4b5a00e44 100644
--- a/vendor/github.com/containers/buildah/util.go
+++ b/vendor/github.com/containers/buildah/util.go
@@ -1,26 +1,20 @@
package buildah
import (
- "archive/tar"
"io"
- "io/ioutil"
"os"
"path/filepath"
+ "sync"
- "github.com/containers/buildah/util"
+ "github.com/containers/buildah/copier"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
- "github.com/containers/storage/pkg/archive"
- "github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
- "github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/reexec"
- "github.com/containers/storage/pkg/system"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
rspec "github.com/opencontainers/runtime-spec/specs-go"
- selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -109,245 +103,6 @@ func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMa
return uidmap, gidmap
}
-// copyFileWithTar returns a function which copies a single file from outside
-// of any container, or another container, into our working container, mapping
-// read permissions using the passed-in ID maps, writing using the container's
-// ID mappings, possibly overridden using the passed-in chownOpts
-func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
- if tarIDMappingOptions == nil {
- tarIDMappingOptions = &IDMappingOptions{
- HostUIDMapping: true,
- HostGIDMapping: true,
- }
- }
-
- var hardlinkChecker util.HardlinkChecker
- return func(src, dest string) error {
- var f *os.File
-
- logrus.Debugf("copyFileWithTar(%s, %s)", src, dest)
- fi, err := os.Lstat(src)
- if err != nil {
- return errors.Wrapf(err, "error reading attributes of %q", src)
- }
-
- sysfi, err := system.Lstat(src)
- if err != nil {
- return errors.Wrapf(err, "error reading attributes of %q", src)
- }
-
- hostUID := sysfi.UID()
- hostGID := sysfi.GID()
- containerUID, containerGID, err := util.GetContainerIDs(tarIDMappingOptions.UIDMap, tarIDMappingOptions.GIDMap, hostUID, hostGID)
- if err != nil {
- return errors.Wrapf(err, "error mapping owner IDs of %q: %d/%d", src, hostUID, hostGID)
- }
-
- hdr, err := tar.FileInfoHeader(fi, filepath.Base(src))
- if err != nil {
- return errors.Wrapf(err, "error generating tar header for: %q", src)
- }
- chrootedDest, err := filepath.Rel(b.MountPoint, dest)
- if err != nil {
- return errors.Wrapf(err, "error generating relative-to-chroot target name for %q", dest)
- }
- hdr.Name = chrootedDest
- hdr.Uid = int(containerUID)
- hdr.Gid = int(containerGID)
-
- if fi.Mode().IsRegular() && hdr.Typeflag == tar.TypeReg {
- if linkname := hardlinkChecker.Check(fi); linkname != "" {
- hdr.Typeflag = tar.TypeLink
- hdr.Linkname = linkname
- } else {
- hardlinkChecker.Add(fi, chrootedDest)
- f, err = os.Open(src)
- if err != nil {
- return errors.Wrapf(err, "error opening %q to copy its contents", src)
- }
- }
- }
-
- if fi.Mode()&os.ModeSymlink == os.ModeSymlink && hdr.Typeflag == tar.TypeSymlink {
- hdr.Typeflag = tar.TypeSymlink
- linkName, err := os.Readlink(src)
- if err != nil {
- return errors.Wrapf(err, "error reading destination from symlink %q", src)
- }
- hdr.Linkname = linkName
- }
-
- pipeReader, pipeWriter := io.Pipe()
- writer := tar.NewWriter(pipeWriter)
- var copyErr error
- go func(srcFile *os.File) {
- err := writer.WriteHeader(hdr)
- if err != nil {
- logrus.Debugf("error writing header for %s: %v", srcFile.Name(), err)
- copyErr = err
- }
- if srcFile != nil {
- n, err := pools.Copy(writer, srcFile)
- if n != hdr.Size {
- logrus.Debugf("expected to write %d bytes for %s, wrote %d instead", hdr.Size, srcFile.Name(), n)
- }
- if err != nil {
- logrus.Debugf("error copying contents of %s: %v", fi.Name(), err)
- copyErr = err
- }
- if err = srcFile.Close(); err != nil {
- logrus.Debugf("error closing %s: %v", fi.Name(), err)
- }
- }
- if err = writer.Close(); err != nil {
- logrus.Debugf("error closing write pipe for %s: %v", hdr.Name, err)
- }
- pipeWriter.Close()
- pipeWriter = nil
- }(f)
-
- untar := b.untar(chownOpts, hasher, dryRun)
- err = untar(pipeReader, b.MountPoint)
- if err == nil {
- err = copyErr
- }
- if pipeWriter != nil {
- pipeWriter.Close()
- }
- return err
- }
-}
-
-// copyWithTar returns a function which copies a directory tree from outside of
-// our container or from another container, into our working container, mapping
-// permissions at read-time using the container's ID maps, with ownership at
-// write-time possibly overridden using the passed-in chownOpts
-func (b *Builder) copyWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
- tar := b.tarPath(tarIDMappingOptions)
- return func(src, dest string) error {
- thisHasher := hasher
- if thisHasher != nil && b.ContentDigester.Hash() != nil {
- thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
- }
- if thisHasher == nil {
- thisHasher = b.ContentDigester.Hash()
- }
- untar := b.untar(chownOpts, thisHasher, dryRun)
- rc, err := tar(src)
- if err != nil {
- return errors.Wrapf(err, "error archiving %q for copy", src)
- }
- return untar(rc, dest)
- }
-}
-
-// untarPath returns a function which extracts an archive in a specified
-// location into our working container, mapping permissions using the
-// container's ID maps, possibly overridden using the passed-in chownOpts
-func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
- convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
- if dryRun {
- return func(src, dest string) error {
- thisHasher := hasher
- if thisHasher != nil && b.ContentDigester.Hash() != nil {
- thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
- }
- if thisHasher == nil {
- thisHasher = b.ContentDigester.Hash()
- }
- f, err := os.Open(src)
- if err != nil {
- return errors.Wrapf(err, "error opening %q", src)
- }
- defer f.Close()
- _, err = io.Copy(thisHasher, f)
- return err
- }
- }
- return func(src, dest string) error {
- thisHasher := hasher
- if thisHasher != nil && b.ContentDigester.Hash() != nil {
- thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
- }
- if thisHasher == nil {
- thisHasher = b.ContentDigester.Hash()
- }
- untarPathAndChown := chrootarchive.UntarPathAndChown(chownOpts, thisHasher, convertedUIDMap, convertedGIDMap)
- return untarPathAndChown(src, dest)
- }
-}
-
-// tarPath returns a function which creates an archive of a specified location,
-// which is often somewhere in the container's filesystem, mapping permissions
-// using the container's ID maps, or the passed-in maps if specified
-func (b *Builder) tarPath(idMappingOptions *IDMappingOptions) func(path string) (io.ReadCloser, error) {
- var uidmap, gidmap []idtools.IDMap
- if idMappingOptions == nil {
- idMappingOptions = &IDMappingOptions{
- HostUIDMapping: true,
- HostGIDMapping: true,
- }
- }
- convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(idMappingOptions.UIDMap, idMappingOptions.GIDMap)
- tarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
- uidmap = tarMappings.UIDs()
- gidmap = tarMappings.GIDs()
- options := &archive.TarOptions{
- Compression: archive.Uncompressed,
- UIDMaps: uidmap,
- GIDMaps: gidmap,
- }
- return func(path string) (io.ReadCloser, error) {
- return archive.TarWithOptions(path, options)
- }
-}
-
-// untar returns a function which extracts an archive stream to a specified
-// location in the container's filesystem, mapping permissions using the
-// container's ID maps, possibly overridden using the passed-in chownOpts
-func (b *Builder) untar(chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(tarArchive io.ReadCloser, dest string) error {
- convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
- untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
- options := &archive.TarOptions{
- UIDMaps: untarMappings.UIDs(),
- GIDMaps: untarMappings.GIDs(),
- ChownOpts: chownOpts,
- }
- untar := chrootarchive.Untar
- if dryRun {
- untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
- if _, err := io.Copy(ioutil.Discard, tarArchive); err != nil {
- return errors.Wrapf(err, "error digesting tar stream")
- }
- return nil
- }
- }
- originalUntar := untar
- untarWithHasher := func(tarArchive io.Reader, dest string, options *archive.TarOptions, untarHasher io.Writer) error {
- reader := tarArchive
- if untarHasher != nil {
- reader = io.TeeReader(tarArchive, untarHasher)
- }
- return originalUntar(reader, dest, options)
- }
- return func(tarArchive io.ReadCloser, dest string) error {
- thisHasher := hasher
- if thisHasher != nil && b.ContentDigester.Hash() != nil {
- thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
- }
- if thisHasher == nil {
- thisHasher = b.ContentDigester.Hash()
- }
- err := untarWithHasher(tarArchive, dest, options, thisHasher)
- if err2 := tarArchive.Close(); err2 != nil {
- if err == nil {
- err = err2
- }
- }
- return err
- }
-}
-
// isRegistryBlocked checks if the named registry is marked as blocked
func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) {
reginfo, err := sysregistriesv2.FindRegistry(sc, registry)
@@ -389,10 +144,10 @@ func isReferenceBlocked(ref types.ImageReference, sc *types.SystemContext) (bool
return false, nil
}
-// ReserveSELinuxLabels reads containers storage and reserves SELinux containers
-// fall all existing buildah containers
+// ReserveSELinuxLabels reads containers storage and reserves SELinux contexts
+// which are already being used by buildah containers.
func ReserveSELinuxLabels(store storage.Store, id string) error {
- if selinux.GetEnabled() {
+ if selinuxGetEnabled() {
containers, err := store.Containers()
if err != nil {
return errors.Wrapf(err, "error getting list of containers")
@@ -438,3 +193,35 @@ func IsContainer(id string, store storage.Store) (bool, error) {
}
return true, nil
}
+
+// Copy content from the directory "src" to the directory "dest", ensuring that
+// content from outside of "root" (which is a parent of "src" or "src" itself)
+// isn't read.
+func extractWithTar(root, src, dest string) error {
+ var getErr, putErr error
+ var wg sync.WaitGroup
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ wg.Add(1)
+ go func() {
+ getErr = copier.Get(root, src, copier.GetOptions{}, []string{"."}, pipeWriter)
+ pipeWriter.Close()
+ wg.Done()
+ }()
+ wg.Add(1)
+ go func() {
+ putErr = copier.Put(dest, dest, copier.PutOptions{}, pipeReader)
+ pipeReader.Close()
+ wg.Done()
+ }()
+ wg.Wait()
+
+ if getErr != nil {
+ return errors.Wrapf(getErr, "error reading %q", src)
+ }
+ if putErr != nil {
+ return errors.Wrapf(putErr, "error copying contents of %q to %q", src, dest)
+ }
+ return nil
+}
diff --git a/vendor/github.com/seccomp/containers-golang/.gitignore b/vendor/github.com/seccomp/containers-golang/.gitignore
deleted file mode 100644
index e433eef88..000000000
--- a/vendor/github.com/seccomp/containers-golang/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-*.orig
-generate
diff --git a/vendor/github.com/seccomp/containers-golang/LICENSE b/vendor/github.com/seccomp/containers-golang/LICENSE
deleted file mode 100644
index bd465fcf0..000000000
--- a/vendor/github.com/seccomp/containers-golang/LICENSE
+++ /dev/null
@@ -1,190 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2018-2019 github.com/seccomp authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/seccomp/containers-golang/Makefile b/vendor/github.com/seccomp/containers-golang/Makefile
deleted file mode 100644
index 2d91917f9..000000000
--- a/vendor/github.com/seccomp/containers-golang/Makefile
+++ /dev/null
@@ -1,32 +0,0 @@
-export GO111MODULE=off
-
-TAGS ?= seccomp
-BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)"
-GO := go
-PACKAGE := github.com/seccomp/containers-golang
-
-sources := $(wildcard *.go)
-
-.PHONY: seccomp.json
-seccomp.json: $(sources)
- $(GO) build -compiler gc $(BUILDFLAGS) ./cmd/generate.go
- $(GO) build -compiler gc ./cmd/generate.go
- $(GO) run ${BUILDFLAGS} cmd/generate.go
-
-all: seccomp.json
-
-.PHONY: test-unit
-test-unit:
- $(GO) test -v $(BUILDFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
- $(GO) test -v $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
-
-.PHONY: vendor
-vendor:
- export GO111MODULE=on \
- $(GO) mod tidy && \
- $(GO) mod vendor && \
- $(GO) mod verify
-
-.PHONY: clean
-clean:
- rm -f generate
diff --git a/vendor/github.com/seccomp/containers-golang/README.md b/vendor/github.com/seccomp/containers-golang/README.md
deleted file mode 100644
index a44238432..000000000
--- a/vendor/github.com/seccomp/containers-golang/README.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# containers-golang
-
-[![CircleCI](https://circleci.com/gh/seccomp/containers-golang.svg?style=shield)](https://circleci.com/gh/seccomp/containers-golang)
-
-`containers-golang` is a set of Go libraries used by container runtimes to generate and load seccomp mappings into the kernel.
-
-seccomp (short for secure computing mode) is a BPF based syscall filter language and present a more conventional function-call based filtering interface that should be familiar to, and easily adopted by, application developers.
-
-## Building
- make - Generates seccomp.json file, which contains the whitelisted syscalls that can be used by container runtime engines like [CRI-O][cri-o], [Buildah][buildah], [Podman][podman] and [Docker][docker], and container runtimes like OCI [Runc][runc] to controll the syscalls available to containers.
-
-### Supported build tags
-
- `seccomp`
-
-## Contributing
-
-When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation.
-
-## Contact
-
-- IRC: #[containers](irc://irc.freenode.net:6667/#containers) on freenode.net
-
-[cri-o]: https://github.com/kubernetes-incubator/cri-o/pulls
-[buildah]: https://github.com/projectatomic/buildah
-[podman]: https://github.com/projectatomic/podman
-[docker]: https://github.com/docker/docker
-[runc]: https://github.com/opencontainers/runc
-
diff --git a/vendor/github.com/seccomp/containers-golang/conversion.go b/vendor/github.com/seccomp/containers-golang/conversion.go
deleted file mode 100644
index 05564487b..000000000
--- a/vendor/github.com/seccomp/containers-golang/conversion.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package seccomp // import "github.com/seccomp/containers-golang"
-
-import "fmt"
-
-var goArchToSeccompArchMap = map[string]Arch{
- "386": ArchX86,
- "amd64": ArchX86_64,
- "amd64p32": ArchX32,
- "arm": ArchARM,
- "arm64": ArchAARCH64,
- "mips": ArchMIPS,
- "mips64": ArchMIPS64,
- "mips64le": ArchMIPSEL64,
- "mips64p32": ArchMIPS64N32,
- "mips64p32le": ArchMIPSEL64N32,
- "mipsle": ArchMIPSEL,
- "ppc": ArchPPC,
- "ppc64": ArchPPC64,
- "ppc64le": ArchPPC64LE,
- "s390": ArchS390,
- "s390x": ArchS390X,
-}
-
-// GoArchToSeccompArch converts a runtime.GOARCH to a seccomp `Arch`. The
-// function returns an error if the architecture conversion is not supported.
-func GoArchToSeccompArch(goArch string) (Arch, error) {
- arch, ok := goArchToSeccompArchMap[goArch]
- if !ok {
- return "", fmt.Errorf("unsupported go arch provided: %s", goArch)
- }
- return arch, nil
-}
diff --git a/vendor/github.com/seccomp/containers-golang/go.mod b/vendor/github.com/seccomp/containers-golang/go.mod
deleted file mode 100644
index 8e21f0f99..000000000
--- a/vendor/github.com/seccomp/containers-golang/go.mod
+++ /dev/null
@@ -1,16 +0,0 @@
-module github.com/seccomp/containers-golang
-
-go 1.14
-
-require (
- github.com/blang/semver v3.5.1+incompatible // indirect
- github.com/hashicorp/go-multierror v1.1.0 // indirect
- github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445
- github.com/opencontainers/runtime-tools v0.9.0
- github.com/opencontainers/selinux v1.6.0 // indirect
- github.com/seccomp/libseccomp-golang v0.9.1
- github.com/sirupsen/logrus v1.6.0 // indirect
- github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
- github.com/xeipuuv/gojsonschema v1.2.0 // indirect
- golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666
-)
diff --git a/vendor/github.com/seccomp/containers-golang/go.sum b/vendor/github.com/seccomp/containers-golang/go.sum
deleted file mode 100644
index d7fc538c0..000000000
--- a/vendor/github.com/seccomp/containers-golang/go.sum
+++ /dev/null
@@ -1,66 +0,0 @@
-github.com/blang/semver v1.1.0 h1:ol1rO7QQB5uy7umSNV7VAmLugfLRD+17sYJujRNYPhg=
-github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
-github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
-github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 h1:Dliu5QO+4JYWu/yMshaMU7G3JN2POGpwjJN7gjy10Go=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ=
-github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2-0.20191007145322-19e92ca81777 h1:7CkKaORyxoXsM8z56r+M0wf3uCpVGVqx4CWq7oJ/4DY=
-github.com/opencontainers/runtime-spec v1.0.2-0.20191007145322-19e92ca81777/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445 h1:y8cfsJRmn8g3VkM4IDpusKSgMUZEXhudm/BuYANLozE=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
-github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4FtGEe8bFg=
-github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
-github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqGe5TgR0g=
-github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
-github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY=
-github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
-github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
-github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
-github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM=
-github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v1.1.0 h1:ngVtJC9TY/lg0AA/1k48FYhBrhRoFlEmWzsehpNAaZg=
-github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
-github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
-github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc h1:EinpED/Eb9JUgDi6pkoFjw+tz69c3lHUZr2+Va84S0w=
-golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666 h1:gVCS+QOncANNPlmlO1AhlU3oxs4V9z+gTtPwIk3p2N8=
-golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp.json b/vendor/github.com/seccomp/containers-golang/seccomp.json
deleted file mode 100644
index 06b39024a..000000000
--- a/vendor/github.com/seccomp/containers-golang/seccomp.json
+++ /dev/null
@@ -1,878 +0,0 @@
-{
- "defaultAction": "SCMP_ACT_ERRNO",
- "archMap": [
- {
- "architecture": "SCMP_ARCH_X86_64",
- "subArchitectures": [
- "SCMP_ARCH_X86",
- "SCMP_ARCH_X32"
- ]
- },
- {
- "architecture": "SCMP_ARCH_AARCH64",
- "subArchitectures": [
- "SCMP_ARCH_ARM"
- ]
- },
- {
- "architecture": "SCMP_ARCH_MIPS64",
- "subArchitectures": [
- "SCMP_ARCH_MIPS",
- "SCMP_ARCH_MIPS64N32"
- ]
- },
- {
- "architecture": "SCMP_ARCH_MIPS64N32",
- "subArchitectures": [
- "SCMP_ARCH_MIPS",
- "SCMP_ARCH_MIPS64"
- ]
- },
- {
- "architecture": "SCMP_ARCH_MIPSEL64",
- "subArchitectures": [
- "SCMP_ARCH_MIPSEL",
- "SCMP_ARCH_MIPSEL64N32"
- ]
- },
- {
- "architecture": "SCMP_ARCH_MIPSEL64N32",
- "subArchitectures": [
- "SCMP_ARCH_MIPSEL",
- "SCMP_ARCH_MIPSEL64"
- ]
- },
- {
- "architecture": "SCMP_ARCH_S390X",
- "subArchitectures": [
- "SCMP_ARCH_S390"
- ]
- }
- ],
- "syscalls": [
- {
- "names": [
- "_llseek",
- "_newselect",
- "accept",
- "accept4",
- "access",
- "adjtimex",
- "alarm",
- "bind",
- "brk",
- "capget",
- "capset",
- "chdir",
- "chmod",
- "chown",
- "chown32",
- "clock_getres",
- "clock_gettime",
- "clock_nanosleep",
- "close",
- "connect",
- "copy_file_range",
- "creat",
- "dup",
- "dup2",
- "dup3",
- "epoll_create",
- "epoll_create1",
- "epoll_ctl",
- "epoll_ctl_old",
- "epoll_pwait",
- "epoll_wait",
- "epoll_wait_old",
- "eventfd",
- "eventfd2",
- "execve",
- "execveat",
- "exit",
- "exit_group",
- "faccessat",
- "fadvise64",
- "fadvise64_64",
- "fallocate",
- "fanotify_mark",
- "fchdir",
- "fchmod",
- "fchmodat",
- "fchown",
- "fchown32",
- "fchownat",
- "fcntl",
- "fcntl64",
- "fdatasync",
- "fgetxattr",
- "flistxattr",
- "flock",
- "fork",
- "fremovexattr",
- "fsetxattr",
- "fstat",
- "fstat64",
- "fstatat64",
- "fstatfs",
- "fstatfs64",
- "fsync",
- "ftruncate",
- "ftruncate64",
- "futex",
- "futimesat",
- "get_robust_list",
- "get_thread_area",
- "getcpu",
- "getcwd",
- "getdents",
- "getdents64",
- "getegid",
- "getegid32",
- "geteuid",
- "geteuid32",
- "getgid",
- "getgid32",
- "getgroups",
- "getgroups32",
- "getitimer",
- "getpeername",
- "getpgid",
- "getpgrp",
- "getpid",
- "getppid",
- "getpriority",
- "getrandom",
- "getresgid",
- "getresgid32",
- "getresuid",
- "getresuid32",
- "getrlimit",
- "getrusage",
- "getsid",
- "getsockname",
- "getsockopt",
- "gettid",
- "gettimeofday",
- "getuid",
- "getuid32",
- "getxattr",
- "inotify_add_watch",
- "inotify_init",
- "inotify_init1",
- "inotify_rm_watch",
- "io_cancel",
- "io_destroy",
- "io_getevents",
- "io_setup",
- "io_submit",
- "ioctl",
- "ioprio_get",
- "ioprio_set",
- "ipc",
- "kill",
- "lchown",
- "lchown32",
- "lgetxattr",
- "link",
- "linkat",
- "listen",
- "listxattr",
- "llistxattr",
- "lremovexattr",
- "lseek",
- "lsetxattr",
- "lstat",
- "lstat64",
- "madvise",
- "memfd_create",
- "mincore",
- "mkdir",
- "mkdirat",
- "mknod",
- "mknodat",
- "mlock",
- "mlock2",
- "mlockall",
- "mmap",
- "mmap2",
- "mount",
- "mprotect",
- "mq_getsetattr",
- "mq_notify",
- "mq_open",
- "mq_timedreceive",
- "mq_timedsend",
- "mq_unlink",
- "mremap",
- "msgctl",
- "msgget",
- "msgrcv",
- "msgsnd",
- "msync",
- "munlock",
- "munlockall",
- "munmap",
- "name_to_handle_at",
- "nanosleep",
- "newfstatat",
- "open",
- "openat",
- "pause",
- "pipe",
- "pipe2",
- "poll",
- "ppoll",
- "prctl",
- "pread64",
- "preadv",
- "preadv2",
- "prlimit64",
- "pselect6",
- "pwrite64",
- "pwritev",
- "pwritev2",
- "read",
- "readahead",
- "readlink",
- "readlinkat",
- "readv",
- "reboot",
- "recv",
- "recvfrom",
- "recvmmsg",
- "recvmsg",
- "remap_file_pages",
- "removexattr",
- "rename",
- "renameat",
- "renameat2",
- "restart_syscall",
- "rmdir",
- "rt_sigaction",
- "rt_sigpending",
- "rt_sigprocmask",
- "rt_sigqueueinfo",
- "rt_sigreturn",
- "rt_sigsuspend",
- "rt_sigtimedwait",
- "rt_tgsigqueueinfo",
- "sched_get_priority_max",
- "sched_get_priority_min",
- "sched_getaffinity",
- "sched_getattr",
- "sched_getparam",
- "sched_getscheduler",
- "sched_rr_get_interval",
- "sched_setaffinity",
- "sched_setattr",
- "sched_setparam",
- "sched_setscheduler",
- "sched_yield",
- "seccomp",
- "select",
- "semctl",
- "semget",
- "semop",
- "semtimedop",
- "send",
- "sendfile",
- "sendfile64",
- "sendmmsg",
- "sendmsg",
- "sendto",
- "set_robust_list",
- "set_thread_area",
- "set_tid_address",
- "setfsgid",
- "setfsgid32",
- "setfsuid",
- "setfsuid32",
- "setgid",
- "setgid32",
- "setgroups",
- "setgroups32",
- "setitimer",
- "setpgid",
- "setpriority",
- "setregid",
- "setregid32",
- "setresgid",
- "setresgid32",
- "setresuid",
- "setresuid32",
- "setreuid",
- "setreuid32",
- "setrlimit",
- "setsid",
- "setsockopt",
- "setuid",
- "setuid32",
- "setxattr",
- "shmat",
- "shmctl",
- "shmdt",
- "shmget",
- "shutdown",
- "sigaltstack",
- "signalfd",
- "signalfd4",
- "sigreturn",
- "socketcall",
- "socketpair",
- "splice",
- "stat",
- "stat64",
- "statfs",
- "statfs64",
- "statx",
- "symlink",
- "symlinkat",
- "sync",
- "sync_file_range",
- "syncfs",
- "sysinfo",
- "syslog",
- "tee",
- "tgkill",
- "time",
- "timer_create",
- "timer_delete",
- "timer_getoverrun",
- "timer_gettime",
- "timer_settime",
- "timerfd_create",
- "timerfd_gettime",
- "timerfd_settime",
- "times",
- "tkill",
- "truncate",
- "truncate64",
- "ugetrlimit",
- "umask",
- "umount",
- "umount2",
- "uname",
- "unlink",
- "unlinkat",
- "unshare",
- "utime",
- "utimensat",
- "utimes",
- "vfork",
- "vmsplice",
- "wait4",
- "waitid",
- "waitpid",
- "write",
- "writev"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {},
- "excludes": {}
- },
- {
- "names": [
- "personality"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 0,
- "value": 0,
- "valueTwo": 0,
- "op": "SCMP_CMP_EQ"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {}
- },
- {
- "names": [
- "personality"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 0,
- "value": 8,
- "valueTwo": 0,
- "op": "SCMP_CMP_EQ"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {}
- },
- {
- "names": [
- "personality"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 0,
- "value": 131072,
- "valueTwo": 0,
- "op": "SCMP_CMP_EQ"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {}
- },
- {
- "names": [
- "personality"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 0,
- "value": 131080,
- "valueTwo": 0,
- "op": "SCMP_CMP_EQ"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {}
- },
- {
- "names": [
- "personality"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 0,
- "value": 4294967295,
- "valueTwo": 0,
- "op": "SCMP_CMP_EQ"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {}
- },
- {
- "names": [
- "sync_file_range2"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "arches": [
- "ppc64le"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "arm_fadvise64_64",
- "arm_sync_file_range",
- "sync_file_range2",
- "breakpoint",
- "cacheflush",
- "set_tls"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "arches": [
- "arm",
- "arm64"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "arch_prctl"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "arches": [
- "amd64",
- "x32"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "modify_ldt"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "arches": [
- "amd64",
- "x32",
- "x86"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "s390_pci_mmio_read",
- "s390_pci_mmio_write",
- "s390_runtime_instr"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "arches": [
- "s390",
- "s390x"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "open_by_handle_at"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_DAC_READ_SEARCH"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "bpf",
- "clone",
- "fanotify_init",
- "lookup_dcookie",
- "mount",
- "name_to_handle_at",
- "perf_event_open",
- "quotactl",
- "setdomainname",
- "sethostname",
- "setns",
- "umount",
- "umount2",
- "unshare"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_ADMIN"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "clone"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 0,
- "value": 2080505856,
- "valueTwo": 0,
- "op": "SCMP_CMP_MASKED_EQ"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {
- "caps": [
- "CAP_SYS_ADMIN"
- ],
- "arches": [
- "s390",
- "s390x"
- ]
- }
- },
- {
- "names": [
- "clone"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 1,
- "value": 2080505856,
- "valueTwo": 0,
- "op": "SCMP_CMP_MASKED_EQ"
- }
- ],
- "comment": "s390 parameter ordering for clone is different",
- "includes": {
- "arches": [
- "s390",
- "s390x"
- ]
- },
- "excludes": {
- "caps": [
- "CAP_SYS_ADMIN"
- ]
- }
- },
- {
- "names": [
- "reboot"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_BOOT"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "chroot"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_CHROOT"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "delete_module",
- "init_module",
- "finit_module",
- "query_module"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_MODULE"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "get_mempolicy",
- "mbind",
- "name_to_handle_at",
- "set_mempolicy"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_NICE"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "acct"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_PACCT"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "kcmp",
- "process_vm_readv",
- "process_vm_writev",
- "ptrace"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_PTRACE"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "iopl",
- "ioperm"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_RAWIO"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "settimeofday",
- "stime",
- "clock_settime"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_TIME"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "vhangup"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_TTY_CONFIG"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "socket"
- ],
- "action": "SCMP_ACT_ERRNO",
- "args": [
- {
- "index": 0,
- "value": 16,
- "valueTwo": 0,
- "op": "SCMP_CMP_EQ"
- },
- {
- "index": 2,
- "value": 9,
- "valueTwo": 0,
- "op": "SCMP_CMP_EQ"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {
- "caps": [
- "CAP_AUDIT_WRITE"
- ]
- },
- "errnoRet": 22
- },
- {
- "names": [
- "socket"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 2,
- "value": 9,
- "valueTwo": 0,
- "op": "SCMP_CMP_NE"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {
- "caps": [
- "CAP_AUDIT_WRITE"
- ]
- }
- },
- {
- "names": [
- "socket"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 0,
- "value": 16,
- "valueTwo": 0,
- "op": "SCMP_CMP_NE"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {
- "caps": [
- "CAP_AUDIT_WRITE"
- ]
- }
- },
- {
- "names": [
- "socket"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 2,
- "value": 9,
- "valueTwo": 0,
- "op": "SCMP_CMP_NE"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {
- "caps": [
- "CAP_AUDIT_WRITE"
- ]
- }
- },
- {
- "names": [
- "socket"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": null,
- "comment": "",
- "includes": {
- "caps": [
- "CAP_AUDIT_WRITE"
- ]
- },
- "excludes": {}
- }
- ]
-} \ No newline at end of file
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go b/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go
deleted file mode 100644
index 86c73bf99..000000000
--- a/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go
+++ /dev/null
@@ -1,744 +0,0 @@
-// +build seccomp
-
-// SPDX-License-Identifier: Apache-2.0
-
-// Copyright 2013-2018 Docker, Inc.
-
-package seccomp // import "github.com/seccomp/containers-golang"
-
-import (
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-func arches() []Architecture {
- return []Architecture{
- {
- Arch: ArchX86_64,
- SubArches: []Arch{ArchX86, ArchX32},
- },
- {
- Arch: ArchAARCH64,
- SubArches: []Arch{ArchARM},
- },
- {
- Arch: ArchMIPS64,
- SubArches: []Arch{ArchMIPS, ArchMIPS64N32},
- },
- {
- Arch: ArchMIPS64N32,
- SubArches: []Arch{ArchMIPS, ArchMIPS64},
- },
- {
- Arch: ArchMIPSEL64,
- SubArches: []Arch{ArchMIPSEL, ArchMIPSEL64N32},
- },
- {
- Arch: ArchMIPSEL64N32,
- SubArches: []Arch{ArchMIPSEL, ArchMIPSEL64},
- },
- {
- Arch: ArchS390X,
- SubArches: []Arch{ArchS390},
- },
- }
-}
-
-// DefaultProfile defines the allowlist for the default seccomp profile.
-func DefaultProfile() *Seccomp {
- einval := uint(syscall.EINVAL)
-
- syscalls := []*Syscall{
- {
- Names: []string{
- "_llseek",
- "_newselect",
- "accept",
- "accept4",
- "access",
- "adjtimex",
- "alarm",
- "bind",
- "brk",
- "capget",
- "capset",
- "chdir",
- "chmod",
- "chown",
- "chown32",
- "clock_getres",
- "clock_gettime",
- "clock_nanosleep",
- "close",
- "connect",
- "copy_file_range",
- "creat",
- "dup",
- "dup2",
- "dup3",
- "epoll_create",
- "epoll_create1",
- "epoll_ctl",
- "epoll_ctl_old",
- "epoll_pwait",
- "epoll_wait",
- "epoll_wait_old",
- "eventfd",
- "eventfd2",
- "execve",
- "execveat",
- "exit",
- "exit_group",
- "faccessat",
- "fadvise64",
- "fadvise64_64",
- "fallocate",
- "fanotify_mark",
- "fchdir",
- "fchmod",
- "fchmodat",
- "fchown",
- "fchown32",
- "fchownat",
- "fcntl",
- "fcntl64",
- "fdatasync",
- "fgetxattr",
- "flistxattr",
- "flock",
- "fork",
- "fremovexattr",
- "fsetxattr",
- "fstat",
- "fstat64",
- "fstatat64",
- "fstatfs",
- "fstatfs64",
- "fsync",
- "ftruncate",
- "ftruncate64",
- "futex",
- "futimesat",
- "get_robust_list",
- "get_thread_area",
- "getcpu",
- "getcwd",
- "getdents",
- "getdents64",
- "getegid",
- "getegid32",
- "geteuid",
- "geteuid32",
- "getgid",
- "getgid32",
- "getgroups",
- "getgroups32",
- "getitimer",
- "getpeername",
- "getpgid",
- "getpgrp",
- "getpid",
- "getppid",
- "getpriority",
- "getrandom",
- "getresgid",
- "getresgid32",
- "getresuid",
- "getresuid32",
- "getrlimit",
- "getrusage",
- "getsid",
- "getsockname",
- "getsockopt",
- "gettid",
- "gettimeofday",
- "getuid",
- "getuid32",
- "getxattr",
- "inotify_add_watch",
- "inotify_init",
- "inotify_init1",
- "inotify_rm_watch",
- "io_cancel",
- "io_destroy",
- "io_getevents",
- "io_setup",
- "io_submit",
- "ioctl",
- "ioprio_get",
- "ioprio_set",
- "ipc",
- "kill",
- "lchown",
- "lchown32",
- "lgetxattr",
- "link",
- "linkat",
- "listen",
- "listxattr",
- "llistxattr",
- "lremovexattr",
- "lseek",
- "lsetxattr",
- "lstat",
- "lstat64",
- "madvise",
- "memfd_create",
- "mincore",
- "mkdir",
- "mkdirat",
- "mknod",
- "mknodat",
- "mlock",
- "mlock2",
- "mlockall",
- "mmap",
- "mmap2",
- "mount",
- "mprotect",
- "mq_getsetattr",
- "mq_notify",
- "mq_open",
- "mq_timedreceive",
- "mq_timedsend",
- "mq_unlink",
- "mremap",
- "msgctl",
- "msgget",
- "msgrcv",
- "msgsnd",
- "msync",
- "munlock",
- "munlockall",
- "munmap",
- "name_to_handle_at",
- "nanosleep",
- "newfstatat",
- "open",
- "openat",
- "pause",
- "pipe",
- "pipe2",
- "poll",
- "ppoll",
- "prctl",
- "pread64",
- "preadv",
- "preadv2",
- "prlimit64",
- "pselect6",
- "pwrite64",
- "pwritev",
- "pwritev2",
- "read",
- "readahead",
- "readlink",
- "readlinkat",
- "readv",
- "reboot",
- "recv",
- "recvfrom",
- "recvmmsg",
- "recvmsg",
- "remap_file_pages",
- "removexattr",
- "rename",
- "renameat",
- "renameat2",
- "restart_syscall",
- "rmdir",
- "rt_sigaction",
- "rt_sigpending",
- "rt_sigprocmask",
- "rt_sigqueueinfo",
- "rt_sigreturn",
- "rt_sigsuspend",
- "rt_sigtimedwait",
- "rt_tgsigqueueinfo",
- "sched_get_priority_max",
- "sched_get_priority_min",
- "sched_getaffinity",
- "sched_getattr",
- "sched_getparam",
- "sched_getscheduler",
- "sched_rr_get_interval",
- "sched_setaffinity",
- "sched_setattr",
- "sched_setparam",
- "sched_setscheduler",
- "sched_yield",
- "seccomp",
- "select",
- "semctl",
- "semget",
- "semop",
- "semtimedop",
- "send",
- "sendfile",
- "sendfile64",
- "sendmmsg",
- "sendmsg",
- "sendto",
- "set_robust_list",
- "set_thread_area",
- "set_tid_address",
- "setfsgid",
- "setfsgid32",
- "setfsuid",
- "setfsuid32",
- "setgid",
- "setgid32",
- "setgroups",
- "setgroups32",
- "setitimer",
- "setpgid",
- "setpriority",
- "setregid",
- "setregid32",
- "setresgid",
- "setresgid32",
- "setresuid",
- "setresuid32",
- "setreuid",
- "setreuid32",
- "setrlimit",
- "setsid",
- "setsockopt",
- "setuid",
- "setuid32",
- "setxattr",
- "shmat",
- "shmctl",
- "shmdt",
- "shmget",
- "shutdown",
- "sigaltstack",
- "signalfd",
- "signalfd4",
- "sigreturn",
- "socketcall",
- "socketpair",
- "splice",
- "stat",
- "stat64",
- "statfs",
- "statfs64",
- "statx",
- "symlink",
- "symlinkat",
- "sync",
- "sync_file_range",
- "syncfs",
- "sysinfo",
- "syslog",
- "tee",
- "tgkill",
- "time",
- "timer_create",
- "timer_delete",
- "timer_getoverrun",
- "timer_gettime",
- "timer_settime",
- "timerfd_create",
- "timerfd_gettime",
- "timerfd_settime",
- "times",
- "tkill",
- "truncate",
- "truncate64",
- "ugetrlimit",
- "umask",
- "umount",
- "umount2",
- "uname",
- "unlink",
- "unlinkat",
- "unshare",
- "utime",
- "utimensat",
- "utimes",
- "vfork",
- "vmsplice",
- "wait4",
- "waitid",
- "waitpid",
- "write",
- "writev",
- },
- Action: ActAllow,
- Args: []*Arg{},
- },
- {
- Names: []string{"personality"},
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 0,
- Value: 0x0,
- Op: OpEqualTo,
- },
- },
- },
- {
- Names: []string{"personality"},
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 0,
- Value: 0x0008,
- Op: OpEqualTo,
- },
- },
- },
- {
- Names: []string{"personality"},
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 0,
- Value: 0x20000,
- Op: OpEqualTo,
- },
- },
- },
- {
- Names: []string{"personality"},
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 0,
- Value: 0x20008,
- Op: OpEqualTo,
- },
- },
- },
- {
- Names: []string{"personality"},
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 0,
- Value: 0xffffffff,
- Op: OpEqualTo,
- },
- },
- },
- {
- Names: []string{
- "sync_file_range2",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Arches: []string{"ppc64le"},
- },
- },
- {
- Names: []string{
- "arm_fadvise64_64",
- "arm_sync_file_range",
- "sync_file_range2",
- "breakpoint",
- "cacheflush",
- "set_tls",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Arches: []string{"arm", "arm64"},
- },
- },
- {
- Names: []string{
- "arch_prctl",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Arches: []string{"amd64", "x32"},
- },
- },
- {
- Names: []string{
- "modify_ldt",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Arches: []string{"amd64", "x32", "x86"},
- },
- },
- {
- Names: []string{
- "s390_pci_mmio_read",
- "s390_pci_mmio_write",
- "s390_runtime_instr",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Arches: []string{"s390", "s390x"},
- },
- },
- {
- Names: []string{
- "open_by_handle_at",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_DAC_READ_SEARCH"},
- },
- },
- {
- Names: []string{
- "bpf",
- "clone",
- "fanotify_init",
- "lookup_dcookie",
- "mount",
- "name_to_handle_at",
- "perf_event_open",
- "quotactl",
- "setdomainname",
- "sethostname",
- "setns",
- "umount",
- "umount2",
- "unshare",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_ADMIN"},
- },
- },
- {
- Names: []string{
- "clone",
- },
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 0,
- Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET,
- ValueTwo: 0,
- Op: OpMaskedEqual,
- },
- },
- Excludes: Filter{
- Caps: []string{"CAP_SYS_ADMIN"},
- Arches: []string{"s390", "s390x"},
- },
- },
- {
- Names: []string{
- "clone",
- },
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 1,
- Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET,
- ValueTwo: 0,
- Op: OpMaskedEqual,
- },
- },
- Comment: "s390 parameter ordering for clone is different",
- Includes: Filter{
- Arches: []string{"s390", "s390x"},
- },
- Excludes: Filter{
- Caps: []string{"CAP_SYS_ADMIN"},
- },
- },
- {
- Names: []string{
- "reboot",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_BOOT"},
- },
- },
- {
- Names: []string{
- "chroot",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_CHROOT"},
- },
- },
- {
- Names: []string{
- "delete_module",
- "init_module",
- "finit_module",
- "query_module",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_MODULE"},
- },
- },
- {
- Names: []string{
- "get_mempolicy",
- "mbind",
- "name_to_handle_at",
- "set_mempolicy",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_NICE"},
- },
- },
- {
- Names: []string{
- "acct",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_PACCT"},
- },
- },
- {
- Names: []string{
- "kcmp",
- "process_vm_readv",
- "process_vm_writev",
- "ptrace",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_PTRACE"},
- },
- },
- {
- Names: []string{
- "iopl",
- "ioperm",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_RAWIO"},
- },
- },
- {
- Names: []string{
- "settimeofday",
- "stime",
- "clock_settime",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_TIME"},
- },
- },
- {
- Names: []string{
- "vhangup",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_TTY_CONFIG"},
- },
- },
- {
- Names: []string{
- "socket",
- },
- Action: ActErrno,
- ErrnoRet: &einval,
- Args: []*Arg{
- {
- Index: 0,
- Value: syscall.AF_NETLINK,
- Op: OpEqualTo,
- },
- {
- Index: 2,
- Value: syscall.NETLINK_AUDIT,
- Op: OpEqualTo,
- },
- },
- Excludes: Filter{
- Caps: []string{"CAP_AUDIT_WRITE"},
- },
- },
- {
- Names: []string{
- "socket",
- },
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 2,
- Value: syscall.NETLINK_AUDIT,
- Op: OpNotEqual,
- },
- },
- Excludes: Filter{
- Caps: []string{"CAP_AUDIT_WRITE"},
- },
- },
- {
- Names: []string{
- "socket",
- },
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 0,
- Value: syscall.AF_NETLINK,
- Op: OpNotEqual,
- },
- },
- Excludes: Filter{
- Caps: []string{"CAP_AUDIT_WRITE"},
- },
- },
- {
- Names: []string{
- "socket",
- },
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 2,
- Value: syscall.NETLINK_AUDIT,
- Op: OpNotEqual,
- },
- },
- Excludes: Filter{
- Caps: []string{"CAP_AUDIT_WRITE"},
- },
- },
- {
- Names: []string{
- "socket",
- },
- Action: ActAllow,
- Includes: Filter{
- Caps: []string{"CAP_AUDIT_WRITE"},
- },
- },
- }
-
- return &Seccomp{
- DefaultAction: ActErrno,
- ArchMap: arches(),
- Syscalls: syscalls,
- }
-}
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp_linux.go b/vendor/github.com/seccomp/containers-golang/seccomp_linux.go
deleted file mode 100644
index 44dcd90b8..000000000
--- a/vendor/github.com/seccomp/containers-golang/seccomp_linux.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// +build seccomp
-
-// SPDX-License-Identifier: Apache-2.0
-
-// Copyright 2013-2018 Docker, Inc.
-
-package seccomp // import "github.com/seccomp/containers-golang"
-
-import (
- "encoding/json"
- "errors"
- "fmt"
-
- "github.com/opencontainers/runtime-spec/specs-go"
- libseccomp "github.com/seccomp/libseccomp-golang"
- "golang.org/x/sys/unix"
-)
-
-//go:generate go run -tags 'seccomp' generate.go
-
-// GetDefaultProfile returns the default seccomp profile.
-func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- return setupSeccomp(DefaultProfile(), rs)
-}
-
-// LoadProfile takes a json string and decodes the seccomp profile.
-func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- var config Seccomp
- if err := json.Unmarshal([]byte(body), &config); err != nil {
- return nil, fmt.Errorf("decoding seccomp profile failed: %v", err)
- }
- return setupSeccomp(&config, rs)
-}
-
-// LoadProfileFromBytes takes a byte slice and decodes the seccomp profile.
-func LoadProfileFromBytes(body []byte, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- config := &Seccomp{}
- if err := json.Unmarshal(body, config); err != nil {
- return nil, fmt.Errorf("decoding seccomp profile failed: %v", err)
- }
- return setupSeccomp(config, rs)
-}
-
-// LoadProfileFromConfig takes a Seccomp struct and a spec to retrieve a LinuxSeccomp
-func LoadProfileFromConfig(config *Seccomp, specgen *specs.Spec) (*specs.LinuxSeccomp, error) {
- return setupSeccomp(config, specgen)
-}
-
-var nativeToSeccomp = map[string]Arch{
- "amd64": ArchX86_64,
- "arm64": ArchAARCH64,
- "mips64": ArchMIPS64,
- "mips64n32": ArchMIPS64N32,
- "mipsel64": ArchMIPSEL64,
- "mipsel64n32": ArchMIPSEL64N32,
- "s390x": ArchS390X,
-}
-
-// inSlice tests whether a string is contained in a slice of strings or not.
-// Comparison is case sensitive
-func inSlice(slice []string, s string) bool {
- for _, ss := range slice {
- if s == ss {
- return true
- }
- }
- return false
-}
-
-func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- if config == nil {
- return nil, nil
- }
-
- // No default action specified, no syscalls listed, assume seccomp disabled
- if config.DefaultAction == "" && len(config.Syscalls) == 0 {
- return nil, nil
- }
-
- newConfig := &specs.LinuxSeccomp{}
-
- var arch string
- var native, err = libseccomp.GetNativeArch()
- if err == nil {
- arch = native.String()
- }
-
- if len(config.Architectures) != 0 && len(config.ArchMap) != 0 {
- return nil, errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'")
- }
-
- // if config.Architectures == 0 then libseccomp will figure out the architecture to use
- if len(config.Architectures) != 0 {
- for _, a := range config.Architectures {
- newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a))
- }
- }
-
- if len(config.ArchMap) != 0 {
- for _, a := range config.ArchMap {
- seccompArch, ok := nativeToSeccomp[arch]
- if ok {
- if a.Arch == seccompArch {
- newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a.Arch))
- for _, sa := range a.SubArches {
- newConfig.Architectures = append(newConfig.Architectures, specs.Arch(sa))
- }
- break
- }
- }
- }
- }
-
- newConfig.DefaultAction = specs.LinuxSeccompAction(config.DefaultAction)
-
-Loop:
- // Loop through all syscall blocks and convert them to libcontainer format after filtering them
- for _, call := range config.Syscalls {
- if len(call.Excludes.Arches) > 0 {
- if inSlice(call.Excludes.Arches, arch) {
- continue Loop
- }
- }
- if len(call.Excludes.Caps) > 0 {
- for _, c := range call.Excludes.Caps {
- if inSlice(rs.Process.Capabilities.Bounding, c) {
- continue Loop
- }
- }
- }
- if len(call.Includes.Arches) > 0 {
- if !inSlice(call.Includes.Arches, arch) {
- continue Loop
- }
- }
- if len(call.Includes.Caps) > 0 {
- for _, c := range call.Includes.Caps {
- if !inSlice(rs.Process.Capabilities.Bounding, c) {
- continue Loop
- }
- }
- }
-
- if call.Name != "" && len(call.Names) != 0 {
- return nil, errors.New("'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'")
- }
-
- if call.Name != "" {
- newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall([]string{call.Name}, call.Action, call.Args, call.ErrnoRet))
- }
-
- if len(call.Names) > 0 {
- newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Names, call.Action, call.Args, call.ErrnoRet))
- }
- }
-
- return newConfig, nil
-}
-
-func createSpecsSyscall(names []string, action Action, args []*Arg, errnoRet *uint) specs.LinuxSyscall {
- newCall := specs.LinuxSyscall{
- Names: names,
- Action: specs.LinuxSeccompAction(action),
- ErrnoRet: errnoRet,
- }
-
- // Loop through all the arguments of the syscall and convert them
- for _, arg := range args {
- newArg := specs.LinuxSeccompArg{
- Index: arg.Index,
- Value: arg.Value,
- ValueTwo: arg.ValueTwo,
- Op: specs.LinuxSeccompOperator(arg.Op),
- }
-
- newCall.Args = append(newCall.Args, newArg)
- }
- return newCall
-}
-
-// IsEnabled returns true if seccomp is enabled for the host.
-func IsEnabled() bool {
- // Check if Seccomp is supported, via CONFIG_SECCOMP.
- if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL {
- // Make sure the kernel has CONFIG_SECCOMP_FILTER.
- if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go b/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go
deleted file mode 100644
index 763f22982..000000000
--- a/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// +build !seccomp
-
-// SPDX-License-Identifier: Apache-2.0
-
-// Copyright 2013-2018 Docker, Inc.
-
-package seccomp // import "github.com/seccomp/containers-golang"
-
-import (
- "errors"
-
- "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-var errNotSupported = errors.New("seccomp not enabled in this build")
-
-// DefaultProfile returns a nil pointer on unsupported systems.
-func DefaultProfile() *Seccomp {
- return nil
-}
-
-// LoadProfile returns an error on unsuppored systems
-func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- return nil, errNotSupported
-}
-
-// GetDefaultProfile returns an error on unsuppored systems
-func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- return nil, errNotSupported
-}
-
-// LoadProfileFromBytes takes a byte slice and decodes the seccomp profile.
-func LoadProfileFromBytes(body []byte, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- return nil, errNotSupported
-}
-
-// LoadProfileFromConfig takes a Seccomp struct and a spec to retrieve a LinuxSeccomp
-func LoadProfileFromConfig(config *Seccomp, specgen *specs.Spec) (*specs.LinuxSeccomp, error) {
- return nil, errNotSupported
-}
-
-// IsEnabled returns true if seccomp is enabled for the host.
-func IsEnabled() bool {
- return false
-}
diff --git a/vendor/github.com/seccomp/containers-golang/types.go b/vendor/github.com/seccomp/containers-golang/types.go
deleted file mode 100644
index 6651c423f..000000000
--- a/vendor/github.com/seccomp/containers-golang/types.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package seccomp // import "github.com/seccomp/containers-golang"
-
-// SPDX-License-Identifier: Apache-2.0
-
-// Copyright 2013-2018 Docker, Inc.
-
-// Seccomp represents the config for a seccomp profile for syscall restriction.
-type Seccomp struct {
- DefaultAction Action `json:"defaultAction"`
- // Architectures is kept to maintain backward compatibility with the old
- // seccomp profile.
- Architectures []Arch `json:"architectures,omitempty"`
- ArchMap []Architecture `json:"archMap,omitempty"`
- Syscalls []*Syscall `json:"syscalls"`
-}
-
-// Architecture is used to represent a specific architecture
-// and its sub-architectures
-type Architecture struct {
- Arch Arch `json:"architecture"`
- SubArches []Arch `json:"subArchitectures"`
-}
-
-// Arch used for architectures
-type Arch string
-
-// Additional architectures permitted to be used for system calls
-// By default only the native architecture of the kernel is permitted
-const (
- ArchX86 Arch = "SCMP_ARCH_X86"
- ArchX86_64 Arch = "SCMP_ARCH_X86_64"
- ArchX32 Arch = "SCMP_ARCH_X32"
- ArchARM Arch = "SCMP_ARCH_ARM"
- ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
- ArchMIPS Arch = "SCMP_ARCH_MIPS"
- ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
- ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
- ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
- ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
- ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
- ArchPPC Arch = "SCMP_ARCH_PPC"
- ArchPPC64 Arch = "SCMP_ARCH_PPC64"
- ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
- ArchS390 Arch = "SCMP_ARCH_S390"
- ArchS390X Arch = "SCMP_ARCH_S390X"
-)
-
-// Action taken upon Seccomp rule match
-type Action string
-
-// Define actions for Seccomp rules
-const (
- ActKill Action = "SCMP_ACT_KILL"
- ActTrap Action = "SCMP_ACT_TRAP"
- ActErrno Action = "SCMP_ACT_ERRNO"
- ActTrace Action = "SCMP_ACT_TRACE"
- ActAllow Action = "SCMP_ACT_ALLOW"
-)
-
-// Operator used to match syscall arguments in Seccomp
-type Operator string
-
-// Define operators for syscall arguments in Seccomp
-const (
- OpNotEqual Operator = "SCMP_CMP_NE"
- OpLessThan Operator = "SCMP_CMP_LT"
- OpLessEqual Operator = "SCMP_CMP_LE"
- OpEqualTo Operator = "SCMP_CMP_EQ"
- OpGreaterEqual Operator = "SCMP_CMP_GE"
- OpGreaterThan Operator = "SCMP_CMP_GT"
- OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
-)
-
-// Arg used for matching specific syscall arguments in Seccomp
-type Arg struct {
- Index uint `json:"index"`
- Value uint64 `json:"value"`
- ValueTwo uint64 `json:"valueTwo"`
- Op Operator `json:"op"`
-}
-
-// Filter is used to conditionally apply Seccomp rules
-type Filter struct {
- Caps []string `json:"caps,omitempty"`
- Arches []string `json:"arches,omitempty"`
-}
-
-// Syscall is used to match a group of syscalls in Seccomp
-type Syscall struct {
- Name string `json:"name,omitempty"`
- Names []string `json:"names,omitempty"`
- Action Action `json:"action"`
- Args []*Arg `json:"args"`
- Comment string `json:"comment"`
- Includes Filter `json:"includes"`
- Excludes Filter `json:"excludes"`
- ErrnoRet *uint `json:"errnoRet,omitempty"`
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 15291feda..ffd90f5a5 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -66,10 +66,11 @@ github.com/containernetworking/plugins/pkg/utils/hwaddr
github.com/containernetworking/plugins/pkg/utils/sysctl
github.com/containernetworking/plugins/plugins/ipam/host-local/backend
github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator
-# github.com/containers/buildah v1.15.1-0.20200813183340-0a8dc1f8064c
+# github.com/containers/buildah v1.16.1
github.com/containers/buildah
github.com/containers/buildah/bind
github.com/containers/buildah/chroot
+github.com/containers/buildah/copier
github.com/containers/buildah/docker
github.com/containers/buildah/imagebuildah
github.com/containers/buildah/manifests
@@ -80,6 +81,7 @@ github.com/containers/buildah/pkg/formats
github.com/containers/buildah/pkg/manifests
github.com/containers/buildah/pkg/overlay
github.com/containers/buildah/pkg/parse
+github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/secrets
github.com/containers/buildah/pkg/supplemented
github.com/containers/buildah/pkg/umask
@@ -486,8 +488,6 @@ github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udpproxy
github.com/rootless-containers/rootlesskit/pkg/port/portutil
# github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8
github.com/safchain/ethtool
-# github.com/seccomp/containers-golang v0.6.0
-github.com/seccomp/containers-golang
# github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf
github.com/seccomp/libseccomp-golang
# github.com/sirupsen/logrus v1.6.0