summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--API.md2
-rw-r--r--RELEASE_NOTES.md91
-rw-r--r--cmd/podman/auto-update.go2
-rw-r--r--cmd/podman/containers/create.go2
-rw-r--r--cmd/podman/containers/ps.go2
-rw-r--r--cmd/podman/images/build.go3
-rw-r--r--cmd/podman/root.go1
-rw-r--r--contrib/cirrus/README.md2
-rw-r--r--contrib/cirrus/lib.sh2
-rw-r--r--docs/source/markdown/podman-build.1.md26
-rw-r--r--docs/source/markdown/podman-generate-systemd.1.md2
-rw-r--r--docs/source/markdown/podman.1.md12
-rw-r--r--go.mod4
-rw-r--r--go.sum13
-rwxr-xr-xhack/get_ci_vm.sh2
-rw-r--r--libpod/container_exec.go2
-rw-r--r--libpod/container_internal.go2
-rw-r--r--libpod/define/errors.go4
-rw-r--r--libpod/define/pod_inspect.go2
-rw-r--r--libpod/image/df.go126
-rw-r--r--libpod/info.go2
-rw-r--r--libpod/oci_conmon_linux.go25
-rw-r--r--libpod/oci_conmon_unsupported.go2
-rw-r--r--libpod/options.go13
-rw-r--r--libpod/runtime.go5
-rw-r--r--pkg/api/handlers/compat/events.go2
-rw-r--r--pkg/api/server/register_play.go2
-rw-r--r--pkg/auth/auth.go2
-rw-r--r--pkg/bindings/test/system_test.go2
-rw-r--r--pkg/domain/entities/engine.go1
-rw-r--r--pkg/domain/entities/play.go2
-rw-r--r--pkg/domain/infra/abi/manifest.go2
-rw-r--r--pkg/domain/infra/abi/system.go70
-rw-r--r--pkg/domain/infra/runtime_libpod.go8
-rw-r--r--pkg/network/netconflist_test.go2
-rw-r--r--pkg/network/network.go2
-rw-r--r--pkg/spec/spec.go2
-rw-r--r--pkg/specgen/generate/storage.go2
-rw-r--r--pkg/util/utils.go24
-rw-r--r--pkg/util/utils_linux_test.go2
-rw-r--r--pkg/varlink/io.podman.varlink2
-rw-r--r--pkg/varlinkapi/container.go4
-rw-r--r--test/e2e/containers_conf_test.go4
-rw-r--r--test/e2e/prune_test.go2
-rw-r--r--test/e2e/ps_test.go2
-rw-r--r--test/python/dockerpy/tests/test_images.py2
-rw-r--r--test/system/030-run.bats6
-rw-r--r--test/system/070-build.bats21
-rw-r--r--test/utils/common_function_test.go4
-rw-r--r--troubleshooting.md33
-rw-r--r--vendor/github.com/containers/buildah/.cirrus.yml6
-rw-r--r--vendor/github.com/containers/buildah/CHANGELOG.md112
-rw-r--r--vendor/github.com/containers/buildah/add.go635
-rw-r--r--vendor/github.com/containers/buildah/buildah.go2
-rw-r--r--vendor/github.com/containers/buildah/changelog.txt113
-rw-r--r--vendor/github.com/containers/buildah/commit.go12
-rw-r--r--vendor/github.com/containers/buildah/common.go79
-rw-r--r--vendor/github.com/containers/buildah/copier/copier.go1526
-rw-r--r--vendor/github.com/containers/buildah/copier/syscall_unix.go79
-rw-r--r--vendor/github.com/containers/buildah/copier/syscall_windows.go83
-rw-r--r--vendor/github.com/containers/buildah/copier/unwrap_112.go11
-rw-r--r--vendor/github.com/containers/buildah/copier/unwrap_113.go18
-rw-r--r--vendor/github.com/containers/buildah/copier/xattrs.go92
-rw-r--r--vendor/github.com/containers/buildah/copier/xattrs_unsupported.go15
-rw-r--r--vendor/github.com/containers/buildah/digester.go15
-rw-r--r--vendor/github.com/containers/buildah/go.mod10
-rw-r--r--vendor/github.com/containers/buildah/go.sum21
-rw-r--r--vendor/github.com/containers/buildah/image.go65
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go8
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/executor.go26
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/stage_executor.go548
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go9
-rw-r--r--vendor/github.com/containers/buildah/pkg/rusage/rusage.go48
-rw-r--r--vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go35
-rw-r--r--vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go18
-rw-r--r--vendor/github.com/containers/buildah/pull.go2
-rw-r--r--vendor/github.com/containers/buildah/run_linux.go15
-rw-r--r--vendor/github.com/containers/buildah/seccomp.go2
-rw-r--r--vendor/github.com/containers/buildah/selinux.go4
-rw-r--r--vendor/github.com/containers/buildah/selinux_unsupported.go4
-rw-r--r--vendor/github.com/containers/buildah/util.go287
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go4
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/default_linux.go (renamed from vendor/github.com/containers/common/pkg/seccomp/seccomp_default_linux.go)6
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/seccomp.json8
-rw-r--r--vendor/github.com/containers/common/version/version.go2
-rw-r--r--vendor/github.com/seccomp/containers-golang/.gitignore2
-rw-r--r--vendor/github.com/seccomp/containers-golang/LICENSE190
-rw-r--r--vendor/github.com/seccomp/containers-golang/Makefile32
-rw-r--r--vendor/github.com/seccomp/containers-golang/README.md29
-rw-r--r--vendor/github.com/seccomp/containers-golang/conversion.go32
-rw-r--r--vendor/github.com/seccomp/containers-golang/go.mod16
-rw-r--r--vendor/github.com/seccomp/containers-golang/go.sum66
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp.json878
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go744
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp_linux.go191
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go45
-rw-r--r--vendor/github.com/seccomp/containers-golang/types.go98
-rw-r--r--vendor/modules.txt8
98 files changed, 3326 insertions, 3479 deletions
diff --git a/API.md b/API.md
index 809bc2b2e..831367d8e 100644
--- a/API.md
+++ b/API.md
@@ -1900,7 +1900,7 @@ insecure [[]string](#[]string)
blocked [[]string](#[]string)
### <a name="InfoStore"></a>type InfoStore
-InfoStore describes the host's storage informatoin
+InfoStore describes the host's storage information
containers [int](https://godoc.org/builtin#int)
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md
index 8bd23bfd0..842aac531 100644
--- a/RELEASE_NOTES.md
+++ b/RELEASE_NOTES.md
@@ -1,5 +1,96 @@
# Release Notes
+## 2.1.0
+### Features
+- A new command, `podman image mount`, has been added. This allows for an image to be mounted, read-only, to inspect its contents without creating a container from it ([#1433](https://github.com/containers/podman/issues/1433)).
+- The `podman save` and `podman load` commands can now create and load archives containing multiple images ([#2669](https://github.com/containers/podman/issues/2669)).
+- Rootless Podman now supports all `podman network` commands, and rootless containers can now be joined to networks.
+- The performance of `podman build` on `ADD` and `COPY` instructions has been greatly improved, especially when a `.dockerignore` is present.
+- The `podman run` and `podman create` commands now support a new mode for the `--cgroups` option, `--cgroups=split`. Podman will create two cgroups under the cgroup it was launched in, one for the container and one for Conmon. This mode is useful for running Podman in a systemd unit, as it ensures that all processes are retained in systemd's cgroup hierarchy ([#6400](https://github.com/containers/podman/issues/6400)).
+- The `podman run` and `podman create` commands can now specify options to slirp4netns by using the `--network` option as follows: `--net slirp4netns:opt1,opt2`. This allows for, among other things, switching the port forwarder used by slirp4netns away from rootlessport.
+- The `podman ps` command now features a new option, `--storage`, to show containers from Buildah, CRI-O and other applications.
+- The `podman run` and `podman create` commands now feature a `--sdnotify` option to control the behavior of systemd's sdnotify with containers, enabling improved support for Podman in `Type=notify` units.
+- The `podman run` command now features a `--preserve-fds` opton to pass file descriptors from the host into the container ([#6458](https://github.com/containers/podman/issues/6458)).
+- The `podman run` and `podman create` commands can now create overlay volume mounts, by adding the `:O` option to a bind mount (e.g. `-v /test:/test:O`). Overlay volume mounts will mount a directory into a container from the host and allow changes to it, but not write those changes back to the directory on the host.
+- The `podman play kube` command now supports the Socket HostPath type ([#7112](https://github.com/containers/podman/issues/7112)).
+- The `podman play kube` command now supports read-only mounts.
+- The `podman play kube` command now properly handles `HostAlias` entries.
+- The `podman generate kube` command now adds entries to `/etc/hosts` from `--host-add` generated YAML as `HostAlias` entries.
+- The `podman play kube` and `podman generate kube` commands now properly support `shareProcessNamespace` to share the PID namespace in pods.
+- The `podman volume ls` command now supports the `dangling` filter to identify volumes that are dangling (not attached to any container).
+- The `podman run` and `podman create` commands now feature a `--umask` option to set the umask of the created container.
+- The `podman create` and `podman run` commands now feature a `--tz` option to set the timezone within the container ([#5128](https://github.com/containers/podman/issues/5128)).
+- Environment variables for Podman can now be added in the `containers.conf` configuration file.
+- The `--mount` option of `podman run` and `podman create` now supports a new mount type, `type=devpts`, to add a `devpts` mount to the container. This is useful for containers that want to mount `/dev/` from the host into the container, but still create a terminal.
+- The `--security-opt` flag to `podman run` and `podman create` now supports a new option, `proc-opts`, to specify options for the container's `/proc` filesystem.
+- Podman with the `crun` OCI runtime now supports a new option to `podman run` and `podman create`, `--cgroup-conf`, which allows for advanced configuration of cgroups on cgroups v2 systems.
+- The `podman create` and `podman run` commands now support a `--override-variant` option, to override the architecture variant of the image that will be pulled and ran.
+- A new global option has been added to Podman, `--runtime-flags`, which allows for setting flags to use when the OCI runtime is called.
+- The `podman manifest add` command now supports the `--cert-dir`, `--auth-file`, `--creds`, and `--tls-verify` options.
+
+### Changes
+- Podman will now retry pulling an image 3 times if a pull fails due to network errors.
+- The `podman exec` command would previously print error messages (e.g. `exec session exited with non-zero exit code -1`) when the command run exited with a non-0 exit code. It no longer does this. The `podman exec` command will still exit with the same exit code as the command run in the container did.
+- Error messages when creating a container or pod with a name that is already in use have been improved.
+- For read-only containers running systemd init, Podman creates a tmpfs filesystem at `/run`. This was previously limited to 65k in size and mounted `noexec`, but is now unlimited size and mounted `exec`.
+- The `podman system reset` command no longer removes configuration files for rootless Podman.
+
+### Bugfixes
+- Fixed a bug where Podman would not add an entry to `/etc/hosts` for a container if it joined another container's network namespace ([#66782](https://github.com/containers/podman/issues/6678)).
+- Fixed a bug where `podman save --format oci-dir` saved the image in an incorrect format ([#6544](https://github.com/containers/podman/issues/6544)).
+- Fixed a bug where privileged containers would still configure an AppArmor profile.
+- Fixed a bug where the `--format` option of `podman system df` was not properly interpreting format codes that included backslashes ([#7149](https://github.com/containers/podman/issues/7149)).
+- Fixed a bug where rootless Podman would ignore errors from `newuidmap` and `newgidmap`, even if `/etc/subuid` and `/etc/subgid` contained valid mappings for the user running Podman.
+- Fixed a bug where the `podman commit` command did not properly handle single-character image names ([#7114](https://github.com/containers/podman/issues/7114)).
+- Fixed a bug where the output of `podman ps --format=json` did not include a `Status` field ([#6980](https://github.com/containers/podman/issues/6980)).
+- Fixed a bug where input to the `--log-level` option was no longer case-insensitive.
+- Fixed a bug where `podman images` could segfault when an image pull was aborted while incomplete, leaving an image without a manifest ([#7444](https://github.com/containers/podman/issues/7444)).
+- Fixed a bug where rootless Podman would try to create the `~/.config` directory when it did not exist, despite not placing any configuration files inside the directory.
+- Fixed a bug where the output of `podman system df` was inconsistent based on whether the `-v` option was specified ([#7405](https://github.com/containers/podman/issues/7405)).
+- Fixed a bug where `--security-opt apparmor=unconfined` would error if Apparmor was not enabled on the system ([#7545](https://github.com/containers/podman/issues/7545)).
+- Fixed a bug where running `podman stop` on multiple containers starting with `--rm` could sometimes cause `no such container` errors ([#7384](https://github.com/containers/podman/issues/7384)).
+- Fixed a bug where `podman-remote` would still try to contact the server when displaying help information about subcommands.
+- Fixed a bug where the `podman build --logfile` command would segfault.
+- Fixed a bug where the `podman generate systemd` command did not properly handle containers which were created with a name given as `--name=$NAME` instead of `--name $NAME` ([#7157](https://github.com/containers/podman/issues/7157)).
+- Fixed a bug where the `podman ps` was ignoring the `--latest` flag.
+- Fixed a bug where the `podman-remote kill` command would hang when a signal that did not kill the container was specified ([#7135](https://github.com/containers/podman/issues/7135)).
+- Fixed a bug where the `--oom-score-adj` option of `podman run` and `podman create` was nonfunctional.
+- Fixed a bug where the `--display` option of `podman runlabel` was nonfunctional.
+- Fixed a bug where the `podman runlabel` command would not pull images that did not exist locally on the system.
+- Fixed a bug where `podman-remote run` would not exit with the correct code with the container was removed by a `podman-remote rm -f` while `podman-remote run` was still running ([#7117](https://github.com/containers/podman/issues/7117)).
+- Fixed a bug where the `podman-remote run --rm` command would error attempting to remove containers that had already been removed (e.g. by `podman-remote rm --force`) ([#7340](https://github.com/containers/podman/issues/7340)).
+- Fixed a bug where `podman --user` with a numeric user and `podman run --userns=keepid` could create users in `/etc/passwd` in the container that belong to groups without a corresponding entry in `/etc/group` ([#7389](https://github.com/containers/podman/issues/7389)).
+- Fixed a bug where `podman run --userns=keepid` could create entries in `/etc/passwd` with a UID that was already in use by another user ([#7503](https://github.com/containers/podman/issues/7503)).
+- Fixed a bug where `podman --user` with a numeric user and `podman run --userns=keepid` could create users that could not be logged into ([#7499](https://github.com/containers/podman/issues/7499)).
+- Fixed a bug where trying to join another container's user namespace with `--userns container:$ID` would fail ([#7547](https://github.com/containers/podman/issues/7547)).
+- Fixed a bug where the `podman play kube` command would trim underscores from container names ([#7020](https://github.com/containers/podman/issues/7020)).
+- Fixed a bug where the `podman attach` command would not show output when attaching to a container with a terminal ([#6523](https://github.com/containers/podman/issues/6253)).
+- Fixed a bug where the `podman system df` command could be extremely slow when large quantities of images were present ([#7406](https://github.com/containers/podman/issues/7406)).
+
+### API
+- Docker-compatible Volume Endpoints (Create, Inspect, List, Remove, Prune) are now available!
+- Added an endpoint for generating systemd unit files for containers.
+- The `last` parameter to the Libpod container list endpoint now has an alias, `limit` ([#6413](https://github.com/containers/podman/issues/6413)).
+- The Libpod image list API new returns timestamps in Unix format, as integer, as opposed to as strings
+- The Compat Inspect endpoint for containers now includes port information in NetworkSettings.
+- The Compat List endpoint for images now features limited support for the (deprecated) `filter` query parameter ([#6797](https://github.com/containers/podman/issues/6797)).
+- Fixed a bug where the Compat Create endpoint for containers was not correctly handling bind mounts.
+- Fixed a bug where the Compat Create endpoint for containers would not return a 404 when the requested image was not present.
+- Fixed a bug where the Compat Create endpoint for containers did not properly handle Entrypoint and Command from images.
+- Fixed a bug where name history information was not properly added in the Libpod Image List endpoint.
+- Fixed a bug where the Libpod image search endpoint improperly populated the Description field of responses.
+- Added a `noTrunc` option to the Libpod image search endpoint.
+- Fixed a bug where the Pod List API would return null, instead of an empty array, when no pods were present ([#7392](https://github.com/containers/podman/issues/7392)).
+- Fixed a bug where endpoints that hijacked would do perform the hijack too early, before being ready to send and receive data ([#7195](https://github.com/containers/podman/issues/7195)).
+- Fixed a bug where Pod endpoints that can operate on multiple containers at once (e.g. Kill, Pause, Unpause, Stop) would not forward errors from individual containers that failed.
+- The Compat List endpoint for networks now supports filtering results ([#7462](https://github.com/containers/podman/issues/7462)).
+- Fixed a bug where the Top endpoint for pods would return both a 500 and 404 when run on a non-existant pod.
+
+### Misc
+- Updated Buildah to v1.16.1
+- Updated the containers/storage library to v1.23.5
+- Updated the containers/common library to v0.22.0
+
## 2.0.6
### Bugfixes
- Fixed a bug where running systemd in a container on a cgroups v1 system would fail.
diff --git a/cmd/podman/auto-update.go b/cmd/podman/auto-update.go
index 8e17b49e0..677266c83 100644
--- a/cmd/podman/auto-update.go
+++ b/cmd/podman/auto-update.go
@@ -41,7 +41,7 @@ func init() {
func autoUpdate(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
- // Backwards compat. System tests expext this error string.
+ // Backwards compat. System tests expect this error string.
return errors.Errorf("`%s` takes no arguments", cmd.CommandPath())
}
report, failures := registry.ContainerEngine().AutoUpdate(registry.GetContext(), autoUpdateOptions)
diff --git a/cmd/podman/containers/create.go b/cmd/podman/containers/create.go
index 5e48aa622..f9d33a223 100644
--- a/cmd/podman/containers/create.go
+++ b/cmd/podman/containers/create.go
@@ -235,7 +235,7 @@ func pullImage(imageName string) (string, error) {
imageRef, err := alltransports.ParseImageName(imageName)
switch {
case err != nil:
- // Assume we specified a local image withouth the explicit storage transport.
+ // Assume we specified a local image without the explicit storage transport.
fallthrough
case imageRef.Transport().Name() == storage.Transport.Name():
diff --git a/cmd/podman/containers/ps.go b/cmd/podman/containers/ps.go
index 2aa3b3a9b..a78b35c08 100644
--- a/cmd/podman/containers/ps.go
+++ b/cmd/podman/containers/ps.go
@@ -414,7 +414,7 @@ func portsToString(ports []ocicni.PortMapping) string {
continue
}
}
- // For each portMapKey, format group list and appned to output string.
+ // For each portMapKey, format group list and append to output string.
for _, portKey := range groupKeyList {
group := portGroupMap[portKey]
portDisplay = append(portDisplay, formatGroup(portKey, group.first, group.last))
diff --git a/cmd/podman/images/build.go b/cmd/podman/images/build.go
index 923109b15..ff5c6ec09 100644
--- a/cmd/podman/images/build.go
+++ b/cmd/podman/images/build.go
@@ -386,6 +386,9 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
}
containerConfig := registry.PodmanConfig()
+ for _, arg := range containerConfig.RuntimeFlags {
+ runtimeFlags = append(runtimeFlags, "--"+arg)
+ }
if containerConfig.Engine.CgroupManager == config.SystemdCgroupsManager {
runtimeFlags = append(runtimeFlags, "--systemd-cgroup")
}
diff --git a/cmd/podman/root.go b/cmd/podman/root.go
index 6cf369f0a..60725b111 100644
--- a/cmd/podman/root.go
+++ b/cmd/podman/root.go
@@ -273,6 +273,7 @@ func rootFlags(cmd *cobra.Command, opts *entities.PodmanConfig) {
pFlags.StringVar(&opts.RegistriesConf, "registries-conf", "", "Path to a registries.conf to use for image processing")
pFlags.StringVar(&opts.Runroot, "runroot", "", "Path to the 'run directory' where all state information is stored")
pFlags.StringVar(&opts.RuntimePath, "runtime", "", "Path to the OCI-compatible binary used to run containers, default is /usr/bin/runc")
+ pFlags.StringArrayVar(&opts.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime")
// -s is deprecated due to conflict with -s on subcommands
pFlags.StringVar(&opts.StorageDriver, "storage-driver", "", "Select which storage driver is used to manage storage of images and containers (default is overlay)")
pFlags.StringArrayVar(&opts.StorageOpts, "storage-opt", []string{}, "Used to pass an option to the storage driver")
diff --git a/contrib/cirrus/README.md b/contrib/cirrus/README.md
index f66560cc8..4056edb6a 100644
--- a/contrib/cirrus/README.md
+++ b/contrib/cirrus/README.md
@@ -116,7 +116,7 @@ gsutil cors get gs://libpod-master-releases
To function properly (allow client "trust" of content from `storage.googleapis.com`) the followiing
metadata JSON should be used. Following the JSON, is an example of the command used to set this
metadata on the libpod-master-releases bucket. For additional information about configuring CORS
-please referr to [the google-storage documentation](https://cloud.google.com/storage/docs/configuring-cors).
+please refer to [the google-storage documentation](https://cloud.google.com/storage/docs/configuring-cors).
```JSON
[
diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh
index f125dd76d..0dbb57ab3 100644
--- a/contrib/cirrus/lib.sh
+++ b/contrib/cirrus/lib.sh
@@ -235,7 +235,7 @@ setup_rootless() {
useradd -g $ROOTLESS_GID -u $ROOTLESS_UID --no-user-group --create-home $ROOTLESS_USER
chown -R $ROOTLESS_USER:$ROOTLESS_USER "$GOPATH" "$GOSRC"
- echo "creating ssh keypair for $USER"
+ echo "creating ssh key pair for $USER"
[[ -r "$HOME/.ssh/id_rsa" ]] || \
ssh-keygen -P "" -f "$HOME/.ssh/id_rsa"
diff --git a/docs/source/markdown/podman-build.1.md b/docs/source/markdown/podman-build.1.md
index c38424a11..7d0aa5001 100644
--- a/docs/source/markdown/podman-build.1.md
+++ b/docs/source/markdown/podman-build.1.md
@@ -353,15 +353,6 @@ another process.
Do not use existing cached images for the container build. Build from the start with a new set of cached layers.
-**--omit-timestamp** *bool-value*
-
-Set the create timestamp to epoch 0 to allow for deterministic builds (defaults to false).
-By default, the created timestamp is changed and written into the image manifest with every commit,
-causing the image's sha256 hash to be different even if the sources are exactly the same otherwise.
-When --omit-timestamp is set to true, the created timestamp is always set to the epoch and therefore not
-changed, allowing the image's sha256 to remain the same. All files committed to the layers of the image
-will get the epoch 0 timestamp.
-
**--os**=*string*
Set the OS to the provided value instead of the current operating system of the host.
@@ -422,16 +413,6 @@ commands specified by the **RUN** instruction.
Note: You can also override the default runtime by setting the BUILDAH\_RUNTIME
environment variable. `export BUILDAH_RUNTIME=/usr/local/bin/runc`
-**--runtime-flag**=*flag*
-
-Adds global flags for the container runtime. To list the supported flags, please
-consult the manpages of the selected container runtime (`runc` is the default
-runtime, the manpage to consult is `runc(8)`. When the machine is configured
-for cgroup V2, the default runtime is `crun`, the manpage to consult is `crun(8)`.).
-
-Note: Do not pass the leading `--` to the flag. To pass the runc flag `--log-format json`
-to podman build, the option given would be `--runtime-flag log-format=json`.
-
**--security-opt**=*option*
Security Options
@@ -480,6 +461,13 @@ Set the target build stage to build. When building a Containerfile with multipl
can be used to specify an intermediate build stage by name as the final stage for the resulting image.
Commands after the target stage will be skipped.
+**--timestamp** *seconds*
+
+Set the create timestamp to seconds since epoch to allow for deterministic builds (defaults to current time).
+By default, the created timestamp is changed and written into the image manifest with every commit,
+causing the image's sha256 hash to be different even if the sources are exactly the same otherwise.
+When --timestamp is set, the created timestamp is always set to the time specified and therefore not changed, allowing the image's sha256 to remain the same. All files committed to the layers of the image will be created with the timestamp.
+
**--tls-verify**=*true|false*
Require HTTPS and verify certificates when talking to container registries (defaults to true). (Not available for remote commands)
diff --git a/docs/source/markdown/podman-generate-systemd.1.md b/docs/source/markdown/podman-generate-systemd.1.md
index 2ee290f0f..af8ea3c39 100644
--- a/docs/source/markdown/podman-generate-systemd.1.md
+++ b/docs/source/markdown/podman-generate-systemd.1.md
@@ -51,7 +51,7 @@ Set the systemd unit name prefix for pods. The default is *pod*.
**--separator**=*separator*
-Set the systemd unit name seperator between the name/id of a container/pod and the prefix. The default is *-*.
+Set the systemd unit name separator between the name/id of a container/pod and the prefix. The default is *-*.
## Examples
diff --git a/docs/source/markdown/podman.1.md b/docs/source/markdown/podman.1.md
index c53da6b5f..2dc6b13bf 100644
--- a/docs/source/markdown/podman.1.md
+++ b/docs/source/markdown/podman.1.md
@@ -99,6 +99,16 @@ Default state dir configured in `/etc/containers/storage.conf`.
Name of the OCI runtime as specified in containers.conf or absolute path to the OCI compatible binary used to run containers.
+**--runtime-flag**=*flag*
+
+Adds global flags for the container runtime. To list the supported flags, please
+consult the manpages of the selected container runtime (`runc` is the default
+runtime, the manpage to consult is `runc(8)`. When the machine is configured
+for cgroup V2, the default runtime is `crun`, the manpage to consult is `crun(8)`.).
+
+Note: Do not pass the leading `--` to the flag. To pass the runc flag `--log-format json`
+to podman build, the option given would be `--runtime-flag log-format=json`.
+
**--storage-driver**=*value*
Storage driver. The default storage driver for UID 0 is configured in /etc/containers/storage.conf (`$HOME/.config/containers/storage.conf` in rootless mode), and is *vfs* for non-root users when *fuse-overlayfs* is not available. The `STORAGE_DRIVER` environment variable overrides the default. The --storage-driver specified driver overrides all.
@@ -285,7 +295,7 @@ The Network File System (NFS) and other distributed file systems (for example: L
For more information, please refer to the [Podman Troubleshooting Page](https://github.com/containers/podman/blob/master/troubleshooting.md).
## SEE ALSO
-`containers-mounts.conf(5)`, `containers-registries.conf(5)`, `containers-storage.conf(5)`, `buildah(1)`, `containers.conf(5)`, `oci-hooks(5)`, `containers-policy.json(5)`, `subuid(5)`, `subgid(5)`, `slirp4netns(1)`
+`containers-mounts.conf(5)`, `containers-registries.conf(5)`, `containers-storage.conf(5)`, `buildah(1)`, `containers.conf(5)`, `oci-hooks(5)`, `containers-policy.json(5)`, `crun(8)`, `runc(8)`, `subuid(5)`, `subgid(5)`, `slirp4netns(1)`
## HISTORY
Dec 2016, Originally compiled by Dan Walsh <dwalsh@redhat.com>
diff --git a/go.mod b/go.mod
index a4a7aa41f..6b40075be 100644
--- a/go.mod
+++ b/go.mod
@@ -10,8 +10,8 @@ require (
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/containernetworking/cni v0.8.0
github.com/containernetworking/plugins v0.8.7
- github.com/containers/buildah v1.15.1-0.20200813183340-0a8dc1f8064c
- github.com/containers/common v0.21.0
+ github.com/containers/buildah v1.16.1
+ github.com/containers/common v0.22.0
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.5.2
github.com/containers/psgo v1.5.1
diff --git a/go.sum b/go.sum
index 9bb058dc8..7eb8fe654 100644
--- a/go.sum
+++ b/go.sum
@@ -68,11 +68,11 @@ github.com/containernetworking/cni v0.8.0 h1:BT9lpgGoH4jw3lFC7Odz2prU5ruiYKcgAjM
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/plugins v0.8.7 h1:bU7QieuAp+sACI2vCzESJ3FoT860urYP+lThyZkb/2M=
github.com/containernetworking/plugins v0.8.7/go.mod h1:R7lXeZaBzpfqapcAbHRW8/CYwm0dHzbz0XEjofx0uB0=
-github.com/containers/buildah v1.15.1-0.20200813183340-0a8dc1f8064c h1:elGbJcB3UjBdk7fBxfAzUNS3IT288U1Dzm0gmhgsnB8=
-github.com/containers/buildah v1.15.1-0.20200813183340-0a8dc1f8064c/go.mod h1:+IklBLPix5wxPEWn26aDay5f5q4A5VtmNjkdyK5YVsI=
-github.com/containers/common v0.19.0/go.mod h1:+NUHV8V5Kmo260ja9Dxtr8ialrDnK4RNzyeEbSgmLac=
-github.com/containers/common v0.21.0 h1:v2U9MrGw0vMgefQf0/uJYBsSnengxLbSORYqhCVEBs0=
+github.com/containers/buildah v1.16.1 h1:kxxZbW0in7cFv/AEQtSPNQ06aemYN5fsya31IS9xd2g=
+github.com/containers/buildah v1.16.1/go.mod h1:i1XqXgpCROnfcq4oNtfrFEk7UzNDxLJ/PZ+CnPyoIq8=
github.com/containers/common v0.21.0/go.mod h1:8w8SVwc+P2p1MOnRMbSKNWXt1Iwd2bKFu2LLZx55DTM=
+github.com/containers/common v0.22.0 h1:MjJIMka4pJddHsfZpQCF7jOmX6vXqMs0ojDeYmPKoSk=
+github.com/containers/common v0.22.0/go.mod h1:qsLcLHM7ha5Nc+JDp5duBwfwEfrnlfjXL/K8HO96QHw=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.5.2-0.20200902171422-1c313b2d23e0 h1:MJ0bKRn2I5I2NJlVzMU7/eP/9yfMCeWaUskl6zgY/nc=
@@ -83,7 +83,6 @@ github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6Gz
github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
github.com/containers/psgo v1.5.1 h1:MQNb7FLbXqBdqz6u4lI2QWizVz4RSTzs1+Nk9XT1iVA=
github.com/containers/psgo v1.5.1/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU=
-github.com/containers/storage v1.23.0/go.mod h1:I1EIAA7B4OwWRSA0b4yq2AW1wjvvfcY0zLWQuwTa4zw=
github.com/containers/storage v1.23.3/go.mod h1:0azTMiuBhArp/VUmH1o4DJAGaaH+qLtEu17pJ/iKJCg=
github.com/containers/storage v1.23.5 h1:He9I6y1vRVXYoQg4v2Q9HFAcX4dI3V5MCCrjeBcjkCY=
github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM=
@@ -255,7 +254,6 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.11 h1:K9z59aO18Aywg2b/WSgBaUX99mHy2BES18Cr5lBKZHk=
github.com/klauspost/compress v1.10.11/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg=
@@ -399,8 +397,6 @@ github.com/rootless-containers/rootlesskit v0.10.0/go.mod h1:OZQfuRPb+2MA1p+hmjH
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 h1:2c1EFnZHIPCW8qKWgHMH/fX2PkSabFc5mrVzfUNdg5U=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
-github.com/seccomp/containers-golang v0.6.0 h1:VWPMMIDr8pAtNjCX0WvLEEK9EQi5lAm4HtJbDtAtFvQ=
-github.com/seccomp/containers-golang v0.6.0/go.mod h1:Dd9mONHvW4YdbSzdm23yf2CFw0iqvqLhO0mEFvPIvm4=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf h1:b0+ZBD3rohnkQ4q5duD1+RyTXTg9yk+qTOPMSQtapO0=
github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
@@ -560,7 +556,6 @@ golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed h1:WBkVNH1zd9jg/dK4HCM4lNANnmd12EHC9z+LmcCG4ns=
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/hack/get_ci_vm.sh b/hack/get_ci_vm.sh
index b37dba508..adf3b1bf2 100755
--- a/hack/get_ci_vm.sh
+++ b/hack/get_ci_vm.sh
@@ -27,7 +27,7 @@ LIBPODROOT=$(realpath "$(dirname $0)/../")
# else: Assume $PWD is the root of the libpod repository
[[ "$LIBPODROOT" != "/" ]] || LIBPODROOT=$PWD
-# Command shortcuts save some typing (asumes $LIBPODROOT is subdir of $HOME)
+# Command shortcuts save some typing (assumes $LIBPODROOT is subdir of $HOME)
PGCLOUD="$GCLOUD_SUDO podman run -it --rm -e AS_ID=$UID -e AS_USER=$USER --security-opt label=disable -v $TMPDIR:$HOME -v $HOME/.config/gcloud:$HOME/.config/gcloud -v $HOME/.config/gcloud/ssh:$HOME/.ssh -v $LIBPODROOT:$LIBPODROOT $GCLOUD_IMAGE --configuration=libpod --project=$PROJECT"
SCP_CMD="$PGCLOUD compute scp"
diff --git a/libpod/container_exec.go b/libpod/container_exec.go
index 2a852ab81..f5f54c7cc 100644
--- a/libpod/container_exec.go
+++ b/libpod/container_exec.go
@@ -629,7 +629,7 @@ func (c *Container) ExecRemove(sessionID string, force bool) error {
logrus.Infof("Removing container %s exec session %s", c.ID(), session.ID())
- // Update status of exec session if running, so we cna check if it
+ // Update status of exec session if running, so we can check if it
// stopped in the meantime.
if session.State == define.ExecStateRunning {
running, err := c.ociRuntime.ExecUpdateStatus(c, session.ID())
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 5a0a0edfa..0514fb46f 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -1269,7 +1269,7 @@ func (c *Container) stop(timeout uint) error {
c.state.StoppedByUser = true
if !conmonAlive {
- // Conmon is dead, so we can't epect an exit code.
+ // Conmon is dead, so we can't expect an exit code.
c.state.ExitCode = -1
c.state.FinishedTime = time.Now()
c.state.State = define.ContainerStateStopped
diff --git a/libpod/define/errors.go b/libpod/define/errors.go
index 7714ebbf0..b3f6483d1 100644
--- a/libpod/define/errors.go
+++ b/libpod/define/errors.go
@@ -163,6 +163,6 @@ var (
ErrNetworkOnPodContainer = errors.New("network cannot be configured when it is shared with a pod")
// ErrStoreNotInitialized indicates that the container storage was never
- // initilized.
- ErrStoreNotInitialized = errors.New("the container storage was never initilized")
+ // initialized.
+ ErrStoreNotInitialized = errors.New("the container storage was never initialized")
)
diff --git a/libpod/define/pod_inspect.go b/libpod/define/pod_inspect.go
index 634cbb728..60e19fe05 100644
--- a/libpod/define/pod_inspect.go
+++ b/libpod/define/pod_inspect.go
@@ -87,7 +87,7 @@ type InspectPodInfraConfig struct {
// HostAdd adds a number of hosts to the infra container's resolv.conf
// which will be shared with the rest of the pod.
HostAdd []string
- // Networks is a list of CNI networks te pod will join.
+ // Networks is a list of CNI networks the pod will join.
Networks []string
}
diff --git a/libpod/image/df.go b/libpod/image/df.go
new file mode 100644
index 000000000..84cf7af9e
--- /dev/null
+++ b/libpod/image/df.go
@@ -0,0 +1,126 @@
+package image
+
+import (
+ "context"
+ "time"
+
+ "github.com/containers/image/v5/docker/reference"
+)
+
+// DiskUsageStat gives disk-usage statistics for a specific image.
+type DiskUsageStat struct {
+ // ID of the image.
+ ID string
+ // Repository of the first recorded name of the image.
+ Repository string
+ // Tag of the first recorded name of the image.
+ Tag string
+ // Created is the creation time of the image.
+ Created time.Time
+ // SharedSize is the amount of space shared with another image.
+ SharedSize uint64
+ // UniqueSize is the amount of space used only by this image.
+ UniqueSize uint64
+ // Size is the total size of the image (i.e., the sum of the shared and
+ // unique size).
+ Size uint64
+ // Number of containers using the image.
+ Containers int
+}
+
+// DiskUsage returns disk-usage statistics for the specified slice of images.
+func (ir *Runtime) DiskUsage(ctx context.Context, images []*Image) ([]DiskUsageStat, error) {
+ stats := make([]DiskUsageStat, len(images))
+
+ // Build a layerTree to quickly compute (and cache!) parent/child
+ // relations.
+ tree, err := ir.layerTree()
+ if err != nil {
+ return nil, err
+ }
+
+ // Calculate the stats for each image.
+ for i, img := range images {
+ stat, err := diskUsageForImage(ctx, img, tree)
+ if err != nil {
+ return nil, err
+ }
+ stats[i] = *stat
+ }
+
+ return stats, nil
+}
+
+// diskUsageForImage returns the disk-usage statistics for the spcified image.
+func diskUsageForImage(ctx context.Context, image *Image, tree *layerTree) (*DiskUsageStat, error) {
+ stat := DiskUsageStat{
+ ID: image.ID(),
+ Created: image.Created(),
+ }
+
+ // Repository and tag.
+ var name, repository, tag string
+ for _, n := range image.Names() {
+ if len(n) > 0 {
+ name = n
+ break
+ }
+ }
+ if len(name) > 0 {
+ named, err := reference.ParseNormalizedNamed(name)
+ if err != nil {
+ return nil, err
+ }
+ repository = named.Name()
+ if tagged, isTagged := named.(reference.NamedTagged); isTagged {
+ tag = tagged.Tag()
+ }
+ } else {
+ repository = "<none>"
+ tag = "<none>"
+ }
+ stat.Repository = repository
+ stat.Tag = tag
+
+ // Shared, unique and total size.
+ parent, err := tree.parent(ctx, image)
+ if err != nil {
+ return nil, err
+ }
+ childIDs, err := tree.children(ctx, image, false)
+ if err != nil {
+ return nil, err
+ }
+ // Optimistically set unique size to the full size of the image.
+ size, err := image.Size(ctx)
+ if err != nil {
+ return nil, err
+ }
+ stat.UniqueSize = *size
+
+ if len(childIDs) > 0 {
+ // If we have children, we share everything.
+ stat.SharedSize = stat.UniqueSize
+ stat.UniqueSize = 0
+ } else if parent != nil {
+ // If we have no children but a parent, remove the parent
+ // (shared) size from the unique one.
+ size, err := parent.Size(ctx)
+ if err != nil {
+ return nil, err
+ }
+ stat.UniqueSize -= *size
+ stat.SharedSize = *size
+ }
+
+ stat.Size = stat.SharedSize + stat.UniqueSize
+
+ // Number of containers using the image.
+ containers, err := image.Containers()
+ if err != nil {
+ return nil, err
+ }
+ stat.Containers = len(containers)
+
+ return &stat, nil
+}
diff --git a/libpod/info.go b/libpod/info.go
index 050d792bc..153000b6f 100644
--- a/libpod/info.go
+++ b/libpod/info.go
@@ -162,7 +162,7 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
return nil, errors.Wrapf(err, "error parsing system uptime")
}
- // TODO Isnt there a simple lib for this, something like humantime?
+ // TODO Isn't there a simple lib for this, something like humantime?
hoursFound := false
var timeBuffer bytes.Buffer
var hoursBuffer bytes.Buffer
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index bb138ca14..5769e5580 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -64,6 +64,7 @@ type ConmonOCIRuntime struct {
logSizeMax int64
noPivot bool
reservePorts bool
+ runtimeFlags []string
supportsJSON bool
supportsKVM bool
supportsNoCgroups bool
@@ -76,7 +77,7 @@ type ConmonOCIRuntime struct {
// The first path that points to a valid executable will be used.
// Deliberately private. Someone should not be able to construct this outside of
// libpod.
-func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtimeCfg *config.Config) (OCIRuntime, error) {
+func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtimeFlags []string, runtimeCfg *config.Config) (OCIRuntime, error) {
if name == "" {
return nil, errors.Wrapf(define.ErrInvalidArg, "the OCI runtime must be provided a non-empty name")
}
@@ -98,6 +99,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
runtime := new(ConmonOCIRuntime)
runtime.name = name
runtime.conmonPath = conmonPath
+ runtime.runtimeFlags = runtimeFlags
runtime.conmonEnv = runtimeCfg.Engine.ConmonEnvVars
runtime.cgroupManager = runtimeCfg.Engine.CgroupManager
@@ -378,7 +380,7 @@ func (r *ConmonOCIRuntime) StartContainer(ctr *Container) error {
if path, ok := os.LookupEnv("PATH"); ok {
env = append(env, fmt.Sprintf("PATH=%s", path))
}
- if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "start", ctr.ID()); err != nil {
+ if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, append(r.runtimeFlags, "start", ctr.ID())...); err != nil {
return err
}
@@ -398,10 +400,11 @@ func (r *ConmonOCIRuntime) KillContainer(ctr *Container, signal uint, all bool)
}
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
var args []string
+ args = append(args, r.runtimeFlags...)
if all {
- args = []string{"kill", "--all", ctr.ID(), fmt.Sprintf("%d", signal)}
+ args = append(args, "kill", "--all", ctr.ID(), fmt.Sprintf("%d", signal))
} else {
- args = []string{"kill", ctr.ID(), fmt.Sprintf("%d", signal)}
+ args = append(args, "kill", ctr.ID(), fmt.Sprintf("%d", signal))
}
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, args...); err != nil {
return errors.Wrapf(err, "error sending signal to container %s", ctr.ID())
@@ -478,7 +481,7 @@ func (r *ConmonOCIRuntime) DeleteContainer(ctr *Container) error {
return err
}
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
- return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "delete", "--force", ctr.ID())
+ return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, append(r.runtimeFlags, "delete", "--force", ctr.ID())...)
}
// PauseContainer pauses the given container.
@@ -488,7 +491,7 @@ func (r *ConmonOCIRuntime) PauseContainer(ctr *Container) error {
return err
}
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
- return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "pause", ctr.ID())
+ return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, append(r.runtimeFlags, "pause", ctr.ID())...)
}
// UnpauseContainer unpauses the given container.
@@ -498,7 +501,7 @@ func (r *ConmonOCIRuntime) UnpauseContainer(ctr *Container) error {
return err
}
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
- return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "resume", ctr.ID())
+ return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, append(r.runtimeFlags, "resume", ctr.ID())...)
}
// HTTPAttach performs an attach for the HTTP API.
@@ -765,6 +768,7 @@ func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options Container
logrus.Debugf("Writing checkpoint to %s", imagePath)
logrus.Debugf("Writing checkpoint logs to %s", workPath)
args := []string{}
+ args = append(args, r.runtimeFlags...)
args = append(args, "checkpoint")
args = append(args, "--image-path")
args = append(args, imagePath)
@@ -1310,6 +1314,13 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p
"--exit-dir", exitDir,
"--socket-dir-path", r.socketsDir,
}
+ if len(r.runtimeFlags) > 0 {
+ rFlags := []string{}
+ for _, arg := range r.runtimeFlags {
+ rFlags = append(rFlags, "--runtime-arg", arg)
+ }
+ args = append(args, rFlags...)
+ }
if r.cgroupManager == config.SystemdCgroupsManager && !ctr.config.NoCgroups && ctr.config.CgroupsMode != cgroupSplit {
args = append(args, "-s")
diff --git a/libpod/oci_conmon_unsupported.go b/libpod/oci_conmon_unsupported.go
index 28d6ef12f..2504c31f0 100644
--- a/libpod/oci_conmon_unsupported.go
+++ b/libpod/oci_conmon_unsupported.go
@@ -17,7 +17,7 @@ type ConmonOCIRuntime struct {
}
// newConmonOCIRuntime is not supported on this OS.
-func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtimeCfg *config.Config) (OCIRuntime, error) {
+func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtimeFlags []string, runtimeCfg *config.Config) (OCIRuntime, error) {
return nil, define.ErrNotImplemented
}
diff --git a/libpod/options.go b/libpod/options.go
index 7eec530ea..d592124bc 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -530,6 +530,17 @@ func WithEnableSDNotify() RuntimeOption {
}
}
+// WithRuntimeFlags adds the global runtime flags to the container config
+func WithRuntimeFlags(runtimeFlags []string) RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return define.ErrRuntimeFinalized
+ }
+ rt.runtimeFlags = runtimeFlags
+ return nil
+ }
+}
+
// Container Creation Options
// WithShmDir sets the directory that should be mounted on /dev/shm.
@@ -608,7 +619,7 @@ func WithSecLabels(labelOpts []string) CtrCreateOption {
}
}
-// WithUser sets the user identity field in configutation.
+// WithUser sets the user identity field in configuration.
// Valid uses [user | user:group | uid | uid:gid | user:gid | uid:group ].
func WithUser(user string) CtrCreateOption {
return func(ctr *Container) error {
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 1d2e624d8..fdd9ebcc8 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -53,6 +53,7 @@ type Runtime struct {
imageContext *types.SystemContext
defaultOCIRuntime OCIRuntime
ociRuntimes map[string]OCIRuntime
+ runtimeFlags []string
netPlugin ocicni.CNIPlugin
conmonPath string
imageRuntime *image.Runtime
@@ -365,7 +366,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) {
// Initialize remaining OCI runtimes
for name, paths := range runtime.config.Engine.OCIRuntimes {
- ociRuntime, err := newConmonOCIRuntime(name, paths, runtime.conmonPath, runtime.config)
+ ociRuntime, err := newConmonOCIRuntime(name, paths, runtime.conmonPath, runtime.runtimeFlags, runtime.config)
if err != nil {
// Don't fatally error.
// This will allow us to ship configs including optional
@@ -385,7 +386,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) {
if strings.HasPrefix(runtime.config.Engine.OCIRuntime, "/") {
name := filepath.Base(runtime.config.Engine.OCIRuntime)
- ociRuntime, err := newConmonOCIRuntime(name, []string{runtime.config.Engine.OCIRuntime}, runtime.conmonPath, runtime.config)
+ ociRuntime, err := newConmonOCIRuntime(name, []string{runtime.config.Engine.OCIRuntime}, runtime.conmonPath, runtime.runtimeFlags, runtime.config)
if err != nil {
return err
}
diff --git a/pkg/api/handlers/compat/events.go b/pkg/api/handlers/compat/events.go
index 61f895c29..289bf4a2d 100644
--- a/pkg/api/handlers/compat/events.go
+++ b/pkg/api/handlers/compat/events.go
@@ -17,7 +17,7 @@ import (
)
// filtersFromRequests extracts the "filters" parameter from the specified
-// http.Request. The paramater can either be a `map[string][]string` as done
+// http.Request. The parameter can either be a `map[string][]string` as done
// in new versions of Docker and libpod, or a `map[string]map[string]bool` as
// done in older versions of Docker. We have to do a bit of Yoga to support
// both - just as Docker does as well.
diff --git a/pkg/api/server/register_play.go b/pkg/api/server/register_play.go
index a96f61099..9b27f36e4 100644
--- a/pkg/api/server/register_play.go
+++ b/pkg/api/server/register_play.go
@@ -24,7 +24,7 @@ func (s *APIServer) registerPlayHandlers(r *mux.Router) error {
// name: tlsVerify
// type: boolean
// default: true
- // description: Require HTTPS and verify signatures when contating registries.
+ // description: Require HTTPS and verify signatures when contacting registries.
// - in: body
// name: request
// description: Kubernetes YAML file.
diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go
index ffa65f7e5..69a7da869 100644
--- a/pkg/auth/auth.go
+++ b/pkg/auth/auth.go
@@ -126,7 +126,7 @@ func encodeMultiAuthConfigs(authConfigs map[string]types.DockerAuthConfig) (stri
// one or more container registries. If tmpDir is empty, the system's default
// TMPDIR will be used.
func authConfigsToAuthFile(authConfigs map[string]types.DockerAuthConfig) (string, error) {
- // Intitialize an empty temporary JSON file.
+ // Initialize an empty temporary JSON file.
tmpFile, err := ioutil.TempFile("", "auth.json.")
if err != nil {
return "", err
diff --git a/pkg/bindings/test/system_test.go b/pkg/bindings/test/system_test.go
index 2b2fa9b7c..82e5c7541 100644
--- a/pkg/bindings/test/system_test.go
+++ b/pkg/bindings/test/system_test.go
@@ -119,7 +119,7 @@ var _ = Describe("Podman system", func() {
// Alpine image should not be pruned as used by running container
Expect(systemPruneResponse.ImagePruneReport.Report.Id).
ToNot(ContainElement("docker.io/library/alpine:latest"))
- // Though unsed volume is available it should not be pruned as flag set to false.
+ // Though unused volume is available it should not be pruned as flag set to false.
Expect(len(systemPruneResponse.VolumePruneReport)).To(Equal(0))
})
diff --git a/pkg/domain/entities/engine.go b/pkg/domain/entities/engine.go
index 6776d09e9..f23d964e5 100644
--- a/pkg/domain/entities/engine.go
+++ b/pkg/domain/entities/engine.go
@@ -46,6 +46,7 @@ type PodmanConfig struct {
RegistriesConf string // allows for specifying a custom registries.conf
Remote bool // Connection to Podman API Service will use RESTful API
RuntimePath string // --runtime flag will set Engine.RuntimePath
+ RuntimeFlags []string // global flags for the container runtime
Span opentracing.Span // tracing object
SpanCloser io.Closer // Close() for tracing object
SpanCtx context.Context // context to use when tracing
diff --git a/pkg/domain/entities/play.go b/pkg/domain/entities/play.go
index 0823bc64e..2ba369b83 100644
--- a/pkg/domain/entities/play.go
+++ b/pkg/domain/entities/play.go
@@ -32,7 +32,7 @@ type PlayKubePod struct {
ID string
// Containers - the IDs of the containers running in the created pod.
Containers []string
- // Logs - non-fatal erros and log messages while processing.
+ // Logs - non-fatal errors and log messages while processing.
Logs []string
}
diff --git a/pkg/domain/infra/abi/manifest.go b/pkg/domain/infra/abi/manifest.go
index 55f73bf65..672d0a69f 100644
--- a/pkg/domain/infra/abi/manifest.go
+++ b/pkg/domain/infra/abi/manifest.go
@@ -130,7 +130,7 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, opts entities.ManifestAd
func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, names []string, opts entities.ManifestAnnotateOptions) (string, error) {
listImage, err := ir.Libpod.ImageRuntime().NewFromLocal(names[0])
if err != nil {
- return "", errors.Wrapf(err, "error retreiving local image from image name %s", names[0])
+ return "", errors.Wrapf(err, "error retrieving local image from image name %s", names[0])
}
digest, err := digest.Parse(names[1])
if err != nil {
diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go
index 914a7681d..57c098166 100644
--- a/pkg/domain/infra/abi/system.go
+++ b/pkg/domain/infra/abi/system.go
@@ -17,7 +17,6 @@ import (
"github.com/containers/podman/v2/pkg/rootless"
"github.com/containers/podman/v2/pkg/util"
"github.com/containers/podman/v2/utils"
- "github.com/docker/distribution/reference"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@@ -199,71 +198,32 @@ func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.System
dfImages = []*entities.SystemDfImageReport{}
)
- // Get Images and iterate them
+ // Compute disk-usage stats for all local images.
imgs, err := ic.Libpod.ImageRuntime().GetImages()
if err != nil {
return nil, err
}
- for _, i := range imgs {
- var sharedSize uint64
- cons, err := i.Containers()
- if err != nil {
- return nil, err
- }
- imageSize, err := i.Size(ctx)
- if err != nil {
- return nil, err
- }
- uniqueSize := *imageSize
- parent, err := i.GetParent(ctx)
- if err != nil {
- return nil, err
- }
- if parent != nil {
- parentSize, err := parent.Size(ctx)
- if err != nil {
- return nil, err
- }
- uniqueSize = *parentSize - *imageSize
- sharedSize = *imageSize - uniqueSize
- }
- var name, repository, tag string
- for _, n := range i.Names() {
- if len(n) > 0 {
- name = n
- break
- }
- }
-
- if len(name) > 0 {
- named, err := reference.ParseNormalizedNamed(name)
- if err != nil {
- return nil, err
- }
- repository = named.Name()
- if tagged, isTagged := named.(reference.NamedTagged); isTagged {
- tag = tagged.Tag()
- }
- } else {
- repository = "<none>"
- tag = "<none>"
- }
+ imageStats, err := ic.Libpod.ImageRuntime().DiskUsage(ctx, imgs)
+ if err != nil {
+ return nil, err
+ }
+ for _, stat := range imageStats {
report := entities.SystemDfImageReport{
- Repository: repository,
- Tag: tag,
- ImageID: i.ID(),
- Created: i.Created(),
- Size: int64(*imageSize),
- SharedSize: int64(sharedSize),
- UniqueSize: int64(uniqueSize),
- Containers: len(cons),
+ Repository: stat.Repository,
+ Tag: stat.Tag,
+ ImageID: stat.ID,
+ Created: stat.Created,
+ Size: int64(stat.Size),
+ SharedSize: int64(stat.SharedSize),
+ UniqueSize: int64(stat.UniqueSize),
+ Containers: stat.Containers,
}
dfImages = append(dfImages, &report)
}
- // GetContainers and iterate them
+ // Get Containers and iterate them
cons, err := ic.Libpod.GetAllContainers()
if err != nil {
return nil, err
diff --git a/pkg/domain/infra/runtime_libpod.go b/pkg/domain/infra/runtime_libpod.go
index a88347e24..f9b8106ef 100644
--- a/pkg/domain/infra/runtime_libpod.go
+++ b/pkg/domain/infra/runtime_libpod.go
@@ -156,6 +156,14 @@ func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpo
options = append(options, libpod.WithRenumber())
}
+ if len(cfg.RuntimeFlags) > 0 {
+ runtimeFlags := []string{}
+ for _, arg := range cfg.RuntimeFlags {
+ runtimeFlags = append(runtimeFlags, "--"+arg)
+ }
+ options = append(options, libpod.WithRuntimeFlags(runtimeFlags))
+ }
+
// Only set this if the user changes storage config on the command line
if storageSet {
options = append(options, libpod.WithStorageConfig(storageOpts))
diff --git a/pkg/network/netconflist_test.go b/pkg/network/netconflist_test.go
index a82a0140a..5893bf985 100644
--- a/pkg/network/netconflist_test.go
+++ b/pkg/network/netconflist_test.go
@@ -28,7 +28,7 @@ func TestNewIPAMDefaultRoute(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
got, err := NewIPAMDefaultRoute(tt.isIPv6)
if err != nil {
- t.Errorf("no errorr expected: %v", err)
+ t.Errorf("no error expected: %v", err)
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewIPAMDefaultRoute() = %v, want %v", got, tt.want)
diff --git a/pkg/network/network.go b/pkg/network/network.go
index db625da56..c4c1ff67f 100644
--- a/pkg/network/network.go
+++ b/pkg/network/network.go
@@ -206,7 +206,7 @@ func InspectNetwork(config *config.Config, name string) (map[string]interface{},
}
// Exists says whether a given network exists or not; it meant
-// specifically for restful reponses so 404s can be used
+// specifically for restful responses so 404s can be used
func Exists(config *config.Config, name string) (bool, error) {
_, err := ReadRawCNIConfByName(config, name)
if err != nil {
diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go
index 5e97620cc..42228540c 100644
--- a/pkg/spec/spec.go
+++ b/pkg/spec/spec.go
@@ -334,7 +334,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
} else {
defaultEnv, err = env.ParseSlice(runtimeConfig.Containers.Env)
if err != nil {
- return nil, errors.Wrap(err, "Env fields in containers.conf failed ot parse")
+ return nil, errors.Wrap(err, "Env fields in containers.conf failed to parse")
}
defaultEnv = env.Join(env.DefaultEnvVariables(), defaultEnv)
}
diff --git a/pkg/specgen/generate/storage.go b/pkg/specgen/generate/storage.go
index 182ae74a7..7f55317ff 100644
--- a/pkg/specgen/generate/storage.go
+++ b/pkg/specgen/generate/storage.go
@@ -46,7 +46,7 @@ func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Ru
return nil, nil, err
}
- // Supercede from --volumes-from.
+ // Supersede from --volumes-from.
for dest, mount := range volFromMounts {
baseMounts[dest] = mount
}
diff --git a/pkg/util/utils.go b/pkg/util/utils.go
index 82282a549..7612d3012 100644
--- a/pkg/util/utils.go
+++ b/pkg/util/utils.go
@@ -537,33 +537,21 @@ func OpenExclusiveFile(path string) (*os.File, error) {
return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
}
-// PullType whether to pull new image
-type PullType int
+type PullType = config.PullPolicy
-const (
+var (
// PullImageAlways always try to pull new image when create or run
- PullImageAlways PullType = iota
+ PullImageAlways = config.PullImageAlways
// PullImageMissing pulls image if it is not locally
- PullImageMissing
+ PullImageMissing = config.PullImageMissing
// PullImageNever will never pull new image
- PullImageNever
+ PullImageNever = config.PullImageNever
)
// ValidatePullType check if the pullType from CLI is valid and returns the valid enum type
// if the value from CLI is invalid returns the error
func ValidatePullType(pullType string) (PullType, error) {
- switch pullType {
- case "always":
- return PullImageAlways, nil
- case "missing", "IfNotPresent":
- return PullImageMissing, nil
- case "never":
- return PullImageNever, nil
- case "":
- return PullImageMissing, nil
- default:
- return PullImageMissing, errors.Errorf("invalid pull type %q", pullType)
- }
+ return config.ValidatePullPolicy(pullType)
}
// ExitCode reads the error message when failing to executing container process
diff --git a/pkg/util/utils_linux_test.go b/pkg/util/utils_linux_test.go
index 38e6dbef9..aa193bbef 100644
--- a/pkg/util/utils_linux_test.go
+++ b/pkg/util/utils_linux_test.go
@@ -8,7 +8,7 @@ import (
)
func TestGetImageConfigStopSignal(t *testing.T) {
- // Linux-only beause parsing signal names is not supported on non-Linux systems by
+ // Linux-only because parsing signal names is not supported on non-Linux systems by
// pkg/signal.
stopSignalValidInt, err := GetImageConfig([]string{"STOPSIGNAL 9"})
require.Nil(t, err)
diff --git a/pkg/varlink/io.podman.varlink b/pkg/varlink/io.podman.varlink
index 6240936d0..cd6316011 100644
--- a/pkg/varlink/io.podman.varlink
+++ b/pkg/varlink/io.podman.varlink
@@ -257,7 +257,7 @@ type InfoRegistry (
blocked: []string
)
-# InfoStore describes the host's storage informatoin
+# InfoStore describes the host's storage information
type InfoStore (
containers: int,
images: int,
diff --git a/pkg/varlinkapi/container.go b/pkg/varlinkapi/container.go
index bf3ed0022..c4e8c1feb 100644
--- a/pkg/varlinkapi/container.go
+++ b/pkg/varlinkapi/container.go
@@ -750,7 +750,7 @@ func portsToString(ports []ocicni.PortMapping) string {
continue
}
}
- // For each portMapKey, format group list and appned to output string.
+ // For each portMapKey, format group list and append to output string.
for _, portKey := range groupKeyList {
group := portGroupMap[portKey]
portDisplay = append(portDisplay, formatGroup(portKey, group.first, group.last))
@@ -794,7 +794,7 @@ func GetRunlabel(label string, runlabelImage string, ctx context.Context, runtim
return runLabel, imageName, err
}
-// GenerateRunlabelCommand generates the command that will eventually be execucted by Podman.
+// GenerateRunlabelCommand generates the command that will eventually be executed by Podman.
func GenerateRunlabelCommand(runLabel, imageName, name string, opts map[string]string, extraArgs []string, globalOpts string) ([]string, []string, error) {
// If no name is provided, we use the image's basename instead.
if name == "" {
diff --git a/test/e2e/containers_conf_test.go b/test/e2e/containers_conf_test.go
index 1054f55f6..8339b7732 100644
--- a/test/e2e/containers_conf_test.go
+++ b/test/e2e/containers_conf_test.go
@@ -93,7 +93,7 @@ var _ = Describe("Podman run", func() {
Expect(session.OutputToString()).ToNot(Equal(cap.OutputToString()))
})
- It("podman Regular capabilties", func() {
+ It("podman Regular capabilities", func() {
SkipIfRootless()
os.Setenv("CONTAINERS_CONF", "config/containers.conf")
setup := podmanTest.RunTopContainer("test1")
@@ -105,7 +105,7 @@ var _ = Describe("Podman run", func() {
Expect(result.OutputToString()).To(ContainSubstring("NET_RAW"))
})
- It("podman drop capabilties", func() {
+ It("podman drop capabilities", func() {
os.Setenv("CONTAINERS_CONF", "config/containers-caps.conf")
setup := podmanTest.RunTopContainer("test1")
setup.WaitWithDefaultTimeout()
diff --git a/test/e2e/prune_test.go b/test/e2e/prune_test.go
index 1d592a42d..9c9d85194 100644
--- a/test/e2e/prune_test.go
+++ b/test/e2e/prune_test.go
@@ -261,7 +261,7 @@ var _ = Describe("Podman prune", func() {
// Two as pods infra container and one newly created.
Expect(podmanTest.NumberOfContainers()).To(Equal(3))
- // image list current count should not be pruned if all flag isnt enabled
+ // image list current count should not be pruned if all flag isn't enabled
session = podmanTest.Podman([]string{"images"})
session.WaitWithDefaultTimeout()
numberOfImages := len(session.OutputToStringArray())
diff --git a/test/e2e/ps_test.go b/test/e2e/ps_test.go
index a2338c924..66233412c 100644
--- a/test/e2e/ps_test.go
+++ b/test/e2e/ps_test.go
@@ -484,7 +484,7 @@ var _ = Describe("Podman ps", func() {
Expect(ps.OutputToString()).To(ContainSubstring("0.0.0.0:8080->80/tcp"))
})
- It("podman ps truncate long create commad", func() {
+ It("podman ps truncate long create command", func() {
session := podmanTest.Podman([]string{"run", ALPINE, "echo", "very", "long", "create", "command"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
diff --git a/test/python/dockerpy/tests/test_images.py b/test/python/dockerpy/tests/test_images.py
index 5eae61c2f..602a86de2 100644
--- a/test/python/dockerpy/tests/test_images.py
+++ b/test/python/dockerpy/tests/test_images.py
@@ -59,7 +59,7 @@ class TestImages(unittest.TestCase):
self.assertFalse
# Validates if name updates when the image is retagged.
- @unittest.skip("dosent work now")
+ @unittest.skip("doesn't work now")
def test_retag_valid_image(self):
client.tag(constant.ALPINE_SHORTNAME, "demo", "rename")
alpine_image = client.inspect_image(constant.ALPINE)
diff --git a/test/system/030-run.bats b/test/system/030-run.bats
index 4e518c571..518d902a7 100644
--- a/test/system/030-run.bats
+++ b/test/system/030-run.bats
@@ -61,6 +61,12 @@ echo $rand | 0 | $rand
is "$tests_run" "$(grep . <<<$tests | wc -l)" "Ran the full set of tests"
}
+@test "podman run - globle runtime option" {
+ skip_if_remote "runtime flag is not passing over remote"
+ run_podman 126 --runtime-flag invalidflag run --rm $IMAGE
+ is "$output" ".*invalidflag" "failed when passing undefined flags to the runtime"
+}
+
# 'run --preserve-fds' passes a number of additional file descriptors into the container
@test "podman run --preserve-fds" {
skip_if_remote "preserve-fds is meaningless over remote"
diff --git a/test/system/070-build.bats b/test/system/070-build.bats
index 997699ecb..66f6610ea 100644
--- a/test/system/070-build.bats
+++ b/test/system/070-build.bats
@@ -30,9 +30,26 @@ EOF
run_podman rmi -f build_test
}
+@test "podman build - global runtime flags test" {
+ skip_if_remote "FIXME: pending #7136"
+
+ rand_content=$(random_string 50)
+
+ tmpdir=$PODMAN_TMPDIR/build-test
+ run mkdir -p $tmpdir
+ containerfile=$tmpdir/Containerfile
+ cat >$containerfile <<EOF
+FROM $IMAGE
+RUN echo $rand_content
+EOF
+
+ run_podman 125 --runtime-flag invalidflag build -t build_test $tmpdir
+ is "$output" ".*invalidflag" "failed when passing undefined flags to the runtime"
+}
+
# Regression from v1.5.0. This test passes fine in v1.5.0, fails in 1.6
@test "podman build - cache (#3920)" {
- skip_if_remote "FIXME: pending #7136"
+ skip_if_remote "FIXME: pending #7136, runtime flag is not passing over remote"
if is_remote && is_rootless; then
skip "unreliable with podman-remote and rootless; #2972"
fi
@@ -128,7 +145,7 @@ echo "\$1"
printenv | grep MYENV | sort | sed -e 's/^MYENV.=//'
EOF
- # For overridding with --env-file
+ # For overriding with --env-file
cat >$PODMAN_TMPDIR/env-file <<EOF
MYENV3=$s_env3
http_proxy=http-proxy-in-env-file
diff --git a/test/utils/common_function_test.go b/test/utils/common_function_test.go
index 26bb8b473..0bbc31d5b 100644
--- a/test/utils/common_function_test.go
+++ b/test/utils/common_function_test.go
@@ -75,10 +75,10 @@ var _ = Describe("Common functions test", func() {
Expect(newer).To(Equal(expect), "Version compare results is not as expect.")
Expect(err == nil).To(Equal(isNil), "Error is not as expect.")
},
- Entry("Invlid kernel version: 0", "0", false, false),
+ Entry("Invalid kernel version: 0", "0", false, false),
Entry("Older kernel version:0.0", "0.0", true, true),
Entry("Newer kernel version: 100.17.14", "100.17.14", false, true),
- Entry("Invlid kernel version: I am not a kernel version", "I am not a kernel version", false, false),
+ Entry("Invalid kernel version: I am not a kernel version", "I am not a kernel version", false, false),
)
DescribeTable("Test TestIsCommandAvailable",
diff --git a/troubleshooting.md b/troubleshooting.md
index 9677b1821..4b0f2e1e4 100644
--- a/troubleshooting.md
+++ b/troubleshooting.md
@@ -42,7 +42,7 @@ $ podman run -v ~/mycontent:/content:Z fedora touch /content/file
Make sure the content is private for the container. Do not relabel system directories and content.
Relabeling system content might cause other confined services on your machine to fail. For these
-types of containers we recommmend that disable SELinux separation. The option `--security-opt label=disable`
+types of containers we recommend that disable SELinux separation. The option `--security-opt label=disable`
will disable SELinux separation for the container.
$ podman run --security-opt label=disable -v ~:/home/user fedora touch /home/user/file
@@ -533,7 +533,7 @@ With the default detach key combo ctrl-p,ctrl-q, shell history navigation
display this previous command. Or anything else. Conmon is waiting for an
additional character to see if the user wants to detach from the container.
Adding additional characters to the command will cause it to be displayed along
-with the additonal character. If the user types ctrl-p a second time the shell
+with the additional character. If the user types ctrl-p a second time the shell
display the 2nd to last command.
#### Solution
@@ -546,7 +546,7 @@ podman run -ti --detach-keys ctrl-q,ctrl-q fedora sh
```
To make this change the default for all containers, users can modify the
-containers.conf file. This can be done simply in your homedir, but adding the
+containers.conf file. This can be done simply in your home directory, but adding the
following lines to users containers.conf
```
@@ -617,3 +617,30 @@ If you encounter a `fuse: device not found` error when running the container ima
the fuse kernel module has not been loaded on your host system. Use the command `modprobe fuse` to load the
module and then run the container image afterwards. To enable this automatically at boot time, you can add a configuration
file to `/etc/modules.load.d`. See `man modules-load.d` for more details.
+
+### 25) podman run --rootfs link/to//read/only/dir does not work
+
+An error such as "OCI runtime error" on a read-only filesystem or the error "{image} is not an absolute path or is a symlink" are often times indicators for this issue. For more details, review this [issue](
+https://github.com/containers/podman/issues/5895).
+
+#### Symptom
+
+Rootless Podman requires certain files to exist in a file system in order to run.
+Podman will create /etc/resolv.conf, /etc/hosts and other file descriptors on the rootfs in order
+to mount volumes on them.
+
+#### Solution
+
+Run the container once in read/write mode, Podman will generate all of the FDs on the rootfs, and
+from that point forward you can run with a read-only rootfs.
+
+$ podman run --rm --rootfs /path/to/rootfs true
+
+The command above will create all the missing directories needed to run the container.
+
+After that, it can be used in read only mode, by multiple containers at the same time:
+
+$ podman run --read-only --rootfs /path/to/rootfs ....
+
+Another option would be to create an overlay file system on the directory as a lower and then
+then allow podman to create the files on the upper.
diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml
index 8fd652ce1..b105f589e 100644
--- a/vendor/github.com/containers/buildah/.cirrus.yml
+++ b/vendor/github.com/containers/buildah/.cirrus.yml
@@ -6,7 +6,7 @@ env:
#### Global variables used for all tasks
####
# Name of the ultimate destination branch for this CI run, PR or post-merge.
- DEST_BRANCH: "master"
+ DEST_BRANCH: "release-1.16"
GOPATH: "/var/tmp/go"
GOSRC: "${GOPATH}/src/github.com/containers/buildah"
# Overrides default location (/tmp/cirrus) for repo clone
@@ -295,11 +295,11 @@ gce_instance:
build_script: |
set -ex
- mkdir -p /nix
mkdir -p .cache
- mount --bind .cache /nix
+ mv .cache /nix
if [[ -z $(ls -A /nix) ]]; then podman run --rm --privileged -ti -v /:/mnt nixos/nix cp -rfT /nix /mnt/nix; fi
podman run --rm --privileged -ti -v /nix:/nix -v ${PWD}:${PWD} -w ${PWD} nixos/nix nix --print-build-logs --option cores 8 --option max-jobs 8 build --file nix/
+ mv /nix .cache
chown -Rf $(whoami) .cache
binaries_artifacts:
diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md
index a3f5f2f11..ecbd0540e 100644
--- a/vendor/github.com/containers/buildah/CHANGELOG.md
+++ b/vendor/github.com/containers/buildah/CHANGELOG.md
@@ -2,6 +2,118 @@
# Changelog
+## v1.16.1 (2020-09-10)
+ copier.Get(): hard link targets shouldn't be relative paths
+
+## v1.16.0 (2020-09-03)
+ fix build on 32bit arches
+ containerImageRef.NewImageSource(): don't always force timestamps
+ Add fuse module warning to image readme
+ Heed our retry delay option values when retrying commit/pull/push
+ Switch to containers/common for seccomp
+ Use --timestamp rather then --omit-timestamp
+ docs: remove outdated notice
+ docs: remove outdated notice
+ build-using-dockerfile: add a hidden --log-rusage flag
+ build(deps): bump github.com/containers/image/v5 from 5.5.1 to 5.5.2
+ Discard ReportWriter if user sets options.Quiet
+ build(deps): bump github.com/containers/common from 0.19.0 to 0.20.3
+ Fix ownership of content copied using COPY --from
+ newTarDigester: zero out timestamps in tar headers
+ Update nix pin with `make nixpkgs`
+ bud.bats: correct .dockerignore integration tests
+ Use pipes for copying
+ run: include stdout in error message
+ run: use the correct error for errors.Wrapf
+ copier: un-export internal types
+ copier: add Mkdir()
+ in_podman: don't get tripped up by $CIRRUS_CHANGE_TITLE
+ docs/buildah-commit.md: tweak some wording, add a --rm example
+ imagebuildah: don’t blank out destination names when COPYing
+ Replace retry functions with common/pkg/retry
+ StageExecutor.historyMatches: compare timestamps using .Equal
+ Update vendor of containers/common
+ Fix errors found in coverity scan
+ Change namespace handling flags to better match podman commands
+ conformance testing: ignore buildah.BuilderIdentityAnnotation labels
+ Vendor in containers/storage v1.23.0
+ Add buildah.IsContainer interface
+ Avoid feeding run_buildah to pipe
+ fix(buildahimage): add xz dependency in buildah image
+ Bump github.com/containers/common from 0.15.2 to 0.18.0
+ Howto for rootless image building from OpenShift
+ Add --omit-timestamp flag to buildah bud
+ Update nix pin with `make nixpkgs`
+ Shutdown storage on failures
+ Handle COPY --from when an argument is used
+ Bump github.com/seccomp/containers-golang from 0.5.0 to 0.6.0
+ Cirrus: Use newly built VM images
+ Bump github.com/opencontainers/runc from 1.0.0-rc91 to 1.0.0-rc92
+ Enhance the .dockerignore man pages
+ conformance: add a test for COPY from subdirectory
+ fix bug manifest inspct
+ Add documentation for .dockerignore
+ Add BuilderIdentityAnnotation to identify buildah version
+ DOC: Add quay.io/containers/buildah image to README.md
+ Update buildahimages readme
+ fix spelling mistake in "info" command result display
+ Don't bind /etc/host and /etc/resolv.conf if network is not present
+ blobcache: avoid an unnecessary NewImage()
+ Build static binary with `buildGoModule`
+ copier: split StripSetidBits into StripSetuidBit/StripSetgidBit/StripStickyBit
+ tarFilterer: handle multiple archives
+ Fix a race we hit during conformance tests
+ Rework conformance testing
+ Update 02-registries-repositories.md
+ test-unit: invoke cmd/buildah tests with --flags
+ parse: fix a type mismatch in a test
+ Fix compilation of tests/testreport/testreport
+ build.sh: log the version of Go that we're using
+ test-unit: increase the test timeout to 40/45 minutes
+ Add the "copier" package
+ Fix & add notes regarding problematic language in codebase
+ Add dependency on github.com/stretchr/testify/require
+ CompositeDigester: add the ability to filter tar streams
+ BATS tests: make more robust
+ vendor golang.org/x/text@v0.3.3
+ Switch golang 1.12 to golang 1.13
+ imagebuildah: wait for stages that might not have even started yet
+ chroot, run: not fail on bind mounts from /sys
+ chroot: do not use setgroups if it is blocked
+ Set engine env from containers.conf
+ imagebuildah: return the right stage's image as the "final" image
+ Fix a help string
+ Deduplicate environment variables
+ switch containers/libpod to containers/podman
+ Bump github.com/containers/ocicrypt from 1.0.2 to 1.0.3
+ Bump github.com/opencontainers/selinux from 1.5.2 to 1.6.0
+ Mask out /sys/dev to prevent information leak
+ linux: skip errors from the runtime kill
+ Mask over the /sys/fs/selinux in mask branch
+ Add VFS additional image store to container
+ tests: add auth tests
+ Allow "readonly" as alias to "ro" in mount options
+ Ignore OS X specific consistency mount option
+ Bump github.com/onsi/ginkgo from 1.13.0 to 1.14.0
+ Bump github.com/containers/common from 0.14.0 to 0.15.2
+ Rootless Buildah should default to IsolationOCIRootless
+ imagebuildah: fix inheriting multi-stage builds
+ Make imagebuildah.BuildOptions.Architecture/OS optional
+ Make imagebuildah.BuildOptions.Jobs optional
+ Resolve a possible race in imagebuildah.Executor.startStage()
+ Switch scripts to use containers.conf
+ Bump openshift/imagebuilder to v1.1.6
+ Bump go.etcd.io/bbolt from 1.3.4 to 1.3.5
+ buildah, bud: support --jobs=N for parallel execution
+ executor: refactor build code inside new function
+ Add bud regression tests
+ Cirrus: Fix missing htpasswd in registry img
+ docs: clarify the 'triples' format
+ CHANGELOG.md: Fix markdown formatting
+ Add nix derivation for static builds
+ Bump to v1.16.0-dev
+ version centos7 for compatible
+
## v1.15.0 (2020-06-17)
Bump github.com/containers/common from 0.12.0 to 0.13.1
Bump github.com/containers/storage from 1.20.1 to 1.20.2
diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go
index 425621028..1c1f116da 100644
--- a/vendor/github.com/containers/buildah/add.go
+++ b/vendor/github.com/containers/buildah/add.go
@@ -1,21 +1,25 @@
package buildah
import (
+ "archive/tar"
+ "fmt"
"io"
+ "io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strings"
+ "sync"
"syscall"
"time"
+ "github.com/containers/buildah/copier"
"github.com/containers/buildah/pkg/chrootuser"
- "github.com/containers/buildah/util"
- "github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
+ "github.com/hashicorp/go-multierror"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -25,17 +29,22 @@ import (
type AddAndCopyOptions struct {
// Chown is a spec for the user who should be given ownership over the
// newly-added content, potentially overriding permissions which would
- // otherwise match those of local files and directories being copied.
+ // otherwise be set to 0:0.
Chown string
+ // PreserveOwnership, if Chown is not set, tells us to avoid setting
+ // ownership of copied items to 0:0, instead using whatever ownership
+ // information is already set. Not meaningful for remote sources.
+ PreserveOwnership bool
// All of the data being copied will pass through Hasher, if set.
// If the sources are URLs or files, their contents will be passed to
// Hasher.
// If the sources include directory trees, Hasher will be passed
// tar-format archives of the directory trees.
Hasher io.Writer
- // Excludes is the contents of the .dockerignore file
+ // Excludes is the contents of the .dockerignore file.
Excludes []string
- // ContextDir is the base directory for Excludes for content being copied
+ // ContextDir is the base directory for content being copied and
+ // Excludes patterns.
ContextDir string
// ID mapping options to use when contents to be copied are part of
// another container, and need ownerships to be mapped from the host to
@@ -44,74 +53,93 @@ type AddAndCopyOptions struct {
// DryRun indicates that the content should be digested, but not actually
// copied into the container.
DryRun bool
+ // Clear the setuid bit on items being copied. Has no effect on
+ // archives being extracted, where the bit is always preserved.
+ StripSetuidBit bool
+ // Clear the setgid bit on items being copied. Has no effect on
+ // archives being extracted, where the bit is always preserved.
+ StripSetgidBit bool
+ // Clear the sticky bit on items being copied. Has no effect on
+ // archives being extracted, where the bit is always preserved.
+ StripStickyBit bool
}
-// addURL copies the contents of the source URL to the destination. This is
-// its own function so that deferred closes happen after we're done pulling
-// down each item of potentially many.
-func (b *Builder) addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer, dryRun bool) error {
- resp, err := http.Get(srcurl)
+// sourceIsRemote returns true if "source" is a remote location.
+func sourceIsRemote(source string) bool {
+ return strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://")
+}
+
+// getURL writes a tar archive containing the named content
+func getURL(src, mountpoint, renameTarget string, writer io.Writer) error {
+ url, err := url.Parse(src)
if err != nil {
- return errors.Wrapf(err, "error getting %q", srcurl)
+ return errors.Wrapf(err, "error parsing URL %q", url)
}
- defer resp.Body.Close()
-
- thisHasher := hasher
- if thisHasher != nil && b.ContentDigester.Hash() != nil {
- thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
+ response, err := http.Get(src)
+ if err != nil {
+ return errors.Wrapf(err, "error parsing URL %q", url)
}
- if thisHasher == nil {
- thisHasher = b.ContentDigester.Hash()
+ defer response.Body.Close()
+ // Figure out what to name the new content.
+ name := renameTarget
+ if name == "" {
+ name = path.Base(url.Path)
}
- thisWriter := thisHasher
-
- if !dryRun {
- logrus.Debugf("saving %q to %q", srcurl, destination)
- f, err := os.Create(destination)
+ // If there's a date on the content, use it. If not, use the Unix epoch
+ // for compatibility.
+ date := time.Unix(0, 0).UTC()
+ lastModified := response.Header.Get("Last-Modified")
+ if lastModified != "" {
+ d, err := time.Parse(time.RFC1123, lastModified)
if err != nil {
- return errors.Wrapf(err, "error creating %q", destination)
+ return errors.Wrapf(err, "error parsing last-modified time %q", lastModified)
}
+ date = d
+ }
+ // Figure out the size of the content.
+ size := response.ContentLength
+ responseBody := response.Body
+ if size < 0 {
+ // Create a temporary file and copy the content to it, so that
+ // we can figure out how much content there is.
+ f, err := ioutil.TempFile(mountpoint, "download")
+ if err != nil {
+ return errors.Wrapf(err, "error creating temporary file to hold %q", src)
+ }
+ defer os.Remove(f.Name())
defer f.Close()
- if err = f.Chown(owner.UID, owner.GID); err != nil {
- return errors.Wrapf(err, "error setting owner of %q to %d:%d", destination, owner.UID, owner.GID)
+ size, err = io.Copy(f, response.Body)
+ if err != nil {
+ return errors.Wrapf(err, "error writing %q to temporary file %q", src, f.Name())
}
- if last := resp.Header.Get("Last-Modified"); last != "" {
- if mtime, err2 := time.Parse(time.RFC1123, last); err2 != nil {
- logrus.Debugf("error parsing Last-Modified time %q: %v", last, err2)
- } else {
- defer func() {
- if err3 := os.Chtimes(destination, time.Now(), mtime); err3 != nil {
- logrus.Debugf("error setting mtime on %q to Last-Modified time %q: %v", destination, last, err3)
- }
- }()
- }
+ _, err = f.Seek(0, io.SeekStart)
+ if err != nil {
+ return errors.Wrapf(err, "error setting up to read %q from temporary file %q", src, f.Name())
}
- defer func() {
- if err2 := f.Chmod(0600); err2 != nil {
- logrus.Debugf("error setting permissions on %q: %v", destination, err2)
- }
- }()
- thisWriter = io.MultiWriter(f, thisWriter)
+ responseBody = f
}
-
- n, err := io.Copy(thisWriter, resp.Body)
- if err != nil {
- return errors.Wrapf(err, "error reading contents for %q from %q", destination, srcurl)
+ // Write the output archive. Set permissions for compatibility.
+ tw := tar.NewWriter(writer)
+ defer tw.Close()
+ hdr := tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: name,
+ Size: size,
+ Mode: 0600,
+ ModTime: date,
}
- if resp.ContentLength >= 0 && n != resp.ContentLength {
- return errors.Errorf("error reading contents for %q from %q: wrong length (%d != %d)", destination, srcurl, n, resp.ContentLength)
+ err = tw.WriteHeader(&hdr)
+ if err != nil {
+ return errors.Wrapf(err, "error writing header")
}
- return nil
+ _, err = io.Copy(tw, responseBody)
+ return errors.Wrapf(err, "error writing content from %q to tar stream", src)
}
// Add copies the contents of the specified sources into the container's root
// filesystem, optionally extracting contents of local files that look like
// non-empty archives.
-func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, source ...string) error {
- excludes, err := dockerIgnoreMatcher(options.Excludes, options.ContextDir)
- if err != nil {
- return err
- }
+func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, sources ...string) error {
mountPoint, err := b.Mount(b.MountLabel)
if err != nil {
return err
@@ -121,267 +149,336 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
logrus.Errorf("error unmounting container: %v", err2)
}
}()
- // Find out which user (and group) the destination should belong to.
- user, _, err := b.user(mountPoint, options.Chown)
- if err != nil {
- return err
- }
- containerOwner := idtools.IDPair{UID: int(user.UID), GID: int(user.GID)}
- hostUID, hostGID, err := util.GetHostIDs(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap, user.UID, user.GID)
- if err != nil {
- return err
+
+ contextDir := options.ContextDir
+ if contextDir == "" {
+ contextDir = string(os.PathSeparator)
}
- hostOwner := idtools.IDPair{UID: int(hostUID), GID: int(hostGID)}
- dest := mountPoint
- if !options.DryRun {
- // Resolve the destination if it was specified as a relative path.
- if destination != "" && filepath.IsAbs(destination) {
- dir := filepath.Dir(destination)
- if dir != "." && dir != "/" {
- if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, dir), 0755, hostOwner); err != nil {
- return errors.Wrapf(err, "error creating directory %q", filepath.Join(dest, dir))
- }
- }
- dest = filepath.Join(dest, destination)
- } else {
- if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, b.WorkDir()), 0755, hostOwner); err != nil {
- return errors.Wrapf(err, "error creating directory %q", filepath.Join(dest, b.WorkDir()))
- }
- dest = filepath.Join(dest, b.WorkDir(), destination)
+
+ // Figure out what sorts of sources we have.
+ var localSources, remoteSources []string
+ for _, src := range sources {
+ if sourceIsRemote(src) {
+ remoteSources = append(remoteSources, src)
+ continue
}
- // If the destination was explicitly marked as a directory by ending it
- // with a '/', create it so that we can be sure that it's a directory,
- // and any files we're copying will be placed in the directory.
- if len(destination) > 0 && destination[len(destination)-1] == os.PathSeparator {
- if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil {
- return errors.Wrapf(err, "error creating directory %q", dest)
- }
+ localSources = append(localSources, src)
+ }
+
+ // Check how many items our local source specs matched. Each spec
+ // should have matched at least one item, otherwise we consider it an
+ // error.
+ var localSourceStats []*copier.StatsForGlob
+ if len(localSources) > 0 {
+ statOptions := copier.StatOptions{
+ CheckForArchives: extract,
}
- // Make sure the destination's parent directory is usable.
- if destpfi, err2 := os.Stat(filepath.Dir(dest)); err2 == nil && !destpfi.IsDir() {
- return errors.Errorf("%q already exists, but is not a subdirectory)", filepath.Dir(dest))
+ localSourceStats, err = copier.Stat(contextDir, contextDir, statOptions, localSources)
+ if err != nil {
+ return errors.Wrapf(err, "error checking on sources %v under %q", localSources, contextDir)
}
}
- // Now look at the destination itself.
- destfi, err := os.Stat(dest)
- if err != nil {
- if !os.IsNotExist(err) {
- return errors.Wrapf(err, "couldn't determine what %q is", dest)
+ numLocalSourceItems := 0
+ for _, localSourceStat := range localSourceStats {
+ if localSourceStat.Error != "" {
+ errorText := localSourceStat.Error
+ rel, err := filepath.Rel(contextDir, localSourceStat.Glob)
+ if err != nil {
+ errorText = fmt.Sprintf("%v; %s", err, errorText)
+ }
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ errorText = fmt.Sprintf("possible escaping context directory error: %s", errorText)
+ }
+ return errors.Errorf("error checking on source %v under %q: %v", localSourceStat.Glob, contextDir, errorText)
}
- destfi = nil
- }
- if len(source) > 1 && (destfi == nil || !destfi.IsDir()) {
- return errors.Errorf("destination %q is not a directory", dest)
+ if len(localSourceStat.Globbed) == 0 {
+ return errors.Wrapf(syscall.ENOENT, "error checking on source %v under %q: no glob matches", localSourceStat.Glob, contextDir)
+ }
+ numLocalSourceItems += len(localSourceStat.Globbed)
}
- copyFileWithTar := b.copyFileWithTar(options.IDMappingOptions, &containerOwner, options.Hasher, options.DryRun)
- copyWithTar := b.copyWithTar(options.IDMappingOptions, &containerOwner, options.Hasher, options.DryRun)
- untarPath := b.untarPath(nil, options.Hasher, options.DryRun)
- err = b.addHelper(excludes, extract, dest, destfi, hostOwner, options, copyFileWithTar, copyWithTar, untarPath, source...)
- if err != nil {
- return err
+ if numLocalSourceItems+len(remoteSources) == 0 {
+ return errors.Wrapf(syscall.ENOENT, "no sources %v found", sources)
}
- return nil
-}
-// user returns the user (and group) information which the destination should belong to.
-func (b *Builder) user(mountPoint string, userspec string) (specs.User, string, error) {
- if userspec == "" {
- userspec = b.User()
+ // Find out which user (and group) the destination should belong to.
+ var chownDirs, chownFiles *idtools.IDPair
+ var chmodDirs, chmodFiles *os.FileMode
+ var user specs.User
+ if options.Chown != "" {
+ user, _, err = b.user(mountPoint, options.Chown)
+ if err != nil {
+ return errors.Wrapf(err, "error looking up UID/GID for %q", options.Chown)
+ }
+ }
+ chownDirs = &idtools.IDPair{UID: int(user.UID), GID: int(user.GID)}
+ chownFiles = &idtools.IDPair{UID: int(user.UID), GID: int(user.GID)}
+ if options.Chown == "" && options.PreserveOwnership {
+ chownDirs = nil
+ chownFiles = nil
}
- uid, gid, homeDir, err := chrootuser.GetUser(mountPoint, userspec)
- u := specs.User{
- UID: uid,
- GID: gid,
- Username: userspec,
+ // If we have a single source archive to extract, or more than one
+ // source item, or the destination has a path separator at the end of
+ // it, and it's not a remote URL, the destination needs to be a
+ // directory.
+ if destination == "" || !filepath.IsAbs(destination) {
+ tmpDestination := filepath.Join(string(os.PathSeparator)+b.WorkDir(), destination)
+ if destination == "" || strings.HasSuffix(destination, string(os.PathSeparator)) {
+ destination = tmpDestination + string(os.PathSeparator)
+ } else {
+ destination = tmpDestination
+ }
}
- if !strings.Contains(userspec, ":") {
- groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
- if err2 != nil {
- if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil {
- err = err2
+ destMustBeDirectory := (len(sources) > 1) || strings.HasSuffix(destination, string(os.PathSeparator))
+ destCanBeFile := false
+ if len(sources) == 1 {
+ if len(remoteSources) == 1 {
+ destCanBeFile = sourceIsRemote(sources[0])
+ }
+ if len(localSources) == 1 {
+ item := localSourceStats[0].Results[localSourceStats[0].Globbed[0]]
+ if item.IsDir || (item.IsArchive && extract) {
+ destMustBeDirectory = true
+ }
+ if item.IsRegular {
+ destCanBeFile = true
}
- } else {
- u.AdditionalGids = groups
}
-
}
- return u, homeDir, err
-}
-// dockerIgnoreMatcher returns a matcher based on the contents of the .dockerignore file under contextDir
-func dockerIgnoreMatcher(lines []string, contextDir string) (*fileutils.PatternMatcher, error) {
- // if there's no context dir, there's no .dockerignore file to consult
- if contextDir == "" {
- return nil, nil
+ // We care if the destination either doesn't exist, or exists and is a
+ // file. If the source can be a single file, for those cases we treat
+ // the destination as a file rather than as a directory tree.
+ renameTarget := ""
+ extractDirectory := filepath.Join(mountPoint, destination)
+ statOptions := copier.StatOptions{
+ CheckForArchives: extract,
}
- // If there's no .dockerignore file, then we don't have to add a
- // pattern to tell copy logic to ignore it later.
- var patterns []string
- if _, err := os.Stat(filepath.Join(contextDir, ".dockerignore")); err == nil || !os.IsNotExist(err) {
- patterns = []string{".dockerignore"}
+ destStats, err := copier.Stat(mountPoint, filepath.Join(mountPoint, b.WorkDir()), statOptions, []string{extractDirectory})
+ if err != nil {
+ return errors.Wrapf(err, "error checking on destination %v", extractDirectory)
}
- for _, ignoreSpec := range lines {
- ignoreSpec = strings.TrimSpace(ignoreSpec)
- // ignore comments passed back from .dockerignore
- if ignoreSpec == "" || ignoreSpec[0] == '#' {
- continue
- }
- // if the spec starts with '!' it means the pattern
- // should be included. make a note so that we can move
- // it to the front of the updated pattern, and insert
- // the context dir's path in between
- includeFlag := ""
- if strings.HasPrefix(ignoreSpec, "!") {
- includeFlag = "!"
- ignoreSpec = ignoreSpec[1:]
- }
- if ignoreSpec == "" {
- continue
- }
- patterns = append(patterns, includeFlag+filepath.Join(contextDir, ignoreSpec))
+ if (len(destStats) == 0 || len(destStats[0].Globbed) == 0) && !destMustBeDirectory && destCanBeFile {
+ // destination doesn't exist - extract to parent and rename the incoming file to the destination's name
+ renameTarget = filepath.Base(extractDirectory)
+ extractDirectory = filepath.Dir(extractDirectory)
}
- // if there are no patterns, save time by not constructing the object
- if len(patterns) == 0 {
- return nil, nil
+ if len(destStats) == 1 && len(destStats[0].Globbed) == 1 && destStats[0].Results[destStats[0].Globbed[0]].IsRegular {
+ if destMustBeDirectory {
+ return errors.Errorf("destination %v already exists but is not a directory", destination)
+ }
+ // destination exists - it's a file, we need to extract to parent and rename the incoming file to the destination's name
+ renameTarget = filepath.Base(extractDirectory)
+ extractDirectory = filepath.Dir(extractDirectory)
}
- // return a matcher object
- matcher, err := fileutils.NewPatternMatcher(patterns)
+
+ pm, err := fileutils.NewPatternMatcher(options.Excludes)
if err != nil {
- return nil, errors.Wrapf(err, "error creating file matcher using patterns %v", patterns)
+ return errors.Wrapf(err, "error processing excludes list %v", options.Excludes)
}
- return matcher, nil
-}
-func (b *Builder) addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error {
- for n, src := range source {
- if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
- b.ContentDigester.Start("")
- // We assume that source is a file, and we're copying
- // it to the destination. If the destination is
- // already a directory, create a file inside of it.
- // Otherwise, the destination is the file to which
- // we'll save the contents.
- url, err := url.Parse(src)
- if err != nil {
- return errors.Wrapf(err, "error parsing URL %q", src)
+ // Copy each source in turn.
+ var srcUIDMap, srcGIDMap []idtools.IDMap
+ if options.IDMappingOptions != nil {
+ srcUIDMap, srcGIDMap = convertRuntimeIDMaps(options.IDMappingOptions.UIDMap, options.IDMappingOptions.GIDMap)
+ }
+ destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
+
+ for _, src := range sources {
+ var multiErr *multierror.Error
+ var getErr, closeErr, renameErr, putErr error
+ var wg sync.WaitGroup
+ if sourceIsRemote(src) {
+ pipeReader, pipeWriter := io.Pipe()
+ wg.Add(1)
+ go func() {
+ getErr = getURL(src, mountPoint, renameTarget, pipeWriter)
+ pipeWriter.Close()
+ wg.Done()
+ }()
+ wg.Add(1)
+ go func() {
+ b.ContentDigester.Start("")
+ hashCloser := b.ContentDigester.Hash()
+ hasher := io.Writer(hashCloser)
+ if options.Hasher != nil {
+ hasher = io.MultiWriter(hasher, options.Hasher)
+ }
+ if options.DryRun {
+ _, putErr = io.Copy(hasher, pipeReader)
+ } else {
+ putOptions := copier.PutOptions{
+ UIDMap: destUIDMap,
+ GIDMap: destGIDMap,
+ ChownDirs: chownDirs,
+ ChmodDirs: chmodDirs,
+ ChownFiles: chownFiles,
+ ChmodFiles: chmodFiles,
+ }
+ putErr = copier.Put(mountPoint, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
+ }
+ hashCloser.Close()
+ pipeReader.Close()
+ wg.Done()
+ }()
+ wg.Wait()
+ if getErr != nil {
+ getErr = errors.Wrapf(getErr, "error reading %q", src)
}
- d := dest
- if destfi != nil && destfi.IsDir() {
- d = filepath.Join(dest, path.Base(url.Path))
+ if putErr != nil {
+ putErr = errors.Wrapf(putErr, "error storing %q", src)
}
- if err = b.addURL(d, src, hostOwner, options.Hasher, options.DryRun); err != nil {
- return err
+ multiErr = multierror.Append(getErr, putErr)
+ if multiErr != nil && multiErr.ErrorOrNil() != nil {
+ if len(multiErr.Errors) > 1 {
+ return multiErr.ErrorOrNil()
+ }
+ return multiErr.Errors[0]
}
continue
}
- glob, err := filepath.Glob(src)
- if err != nil {
- return errors.Wrapf(err, "invalid glob %q", src)
+ // Dig out the result of running glob+stat on this source spec.
+ var localSourceStat *copier.StatsForGlob
+ for _, st := range localSourceStats {
+ if st.Glob == src {
+ localSourceStat = st
+ break
+ }
}
- if len(glob) == 0 {
- return errors.Wrapf(syscall.ENOENT, "no files found matching %q", src)
+ if localSourceStat == nil {
+ return errors.Errorf("internal error: should have statted %s, but we didn't?", src)
}
- for _, gsrc := range glob {
- esrc, err := filepath.EvalSymlinks(gsrc)
+ // Iterate through every item that matched the glob.
+ itemsCopied := 0
+ for _, glob := range localSourceStat.Globbed {
+ rel, err := filepath.Rel(contextDir, glob)
if err != nil {
- return errors.Wrapf(err, "error evaluating symlinks %q", gsrc)
+ return errors.Wrapf(err, "error computing path of %q", glob)
}
- srcfi, err := os.Stat(esrc)
- if err != nil {
- return errors.Wrapf(err, "error reading %q", esrc)
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return errors.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
}
- if srcfi.IsDir() {
- b.ContentDigester.Start("dir")
- // The source is a directory, so copy the contents of
- // the source directory into the target directory. Try
- // to create it first, so that if there's a problem,
- // we'll discover why that won't work.
- if !options.DryRun {
- if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil {
- return errors.Wrapf(err, "error creating directory %q", dest)
- }
+ // Check for dockerignore-style exclusion of this item.
+ if rel != "." {
+ matches, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
+ if err != nil {
+ return errors.Wrapf(err, "error checking if %q(%q) is excluded", glob, rel)
}
- logrus.Debugf("copying[%d] %q to %q", n, esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*")
-
- // Copy the whole directory because we do not exclude anything
- if excludes == nil {
- if err = copyWithTar(esrc, dest); err != nil {
- return errors.Wrapf(err, "error copying %q to %q", esrc, dest)
- }
+ if matches {
continue
}
- err := filepath.Walk(esrc, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
-
- res, err := excludes.MatchesResult(path)
- if err != nil {
- return errors.Wrapf(err, "error checking if %s is an excluded path", path)
- }
- // The latest match result has the highest priority,
- // which means that we only skip the filepath if
- // the last result matched.
- if res.IsMatched() {
- return nil
- }
-
- // combine the source's basename with the dest directory
- fpath, err := filepath.Rel(esrc, path)
- if err != nil {
- return errors.Wrapf(err, "error converting %s to a path relative to %s", path, esrc)
- }
- if err = copyFileWithTar(path, filepath.Join(dest, fpath)); err != nil {
- return errors.Wrapf(err, "error copying %q to %q", path, dest)
- }
- return nil
- })
- if err != nil {
- return err
- }
- continue
}
-
- // This source is a file
- // Check if the path matches the .dockerignore
- if excludes != nil {
- res, err := excludes.MatchesResult(esrc)
- if err != nil {
- return errors.Wrapf(err, "error checking if %s is an excluded path", esrc)
+ st := localSourceStat.Results[glob]
+ pipeReader, pipeWriter := io.Pipe()
+ wg.Add(1)
+ go func() {
+ renamedItems := 0
+ writer := io.WriteCloser(pipeWriter)
+ if renameTarget != "" {
+ writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
+ hdr.Name = renameTarget
+ renamedItems++
+ return false, false, nil
+ })
}
- // Skip the file if the pattern matches
- if res.IsMatched() {
- continue
+ getOptions := copier.GetOptions{
+ UIDMap: srcUIDMap,
+ GIDMap: srcGIDMap,
+ Excludes: options.Excludes,
+ ExpandArchives: extract,
+ StripSetuidBit: options.StripSetuidBit,
+ StripSetgidBit: options.StripSetgidBit,
+ StripStickyBit: options.StripStickyBit,
}
- }
-
- b.ContentDigester.Start("file")
-
- if !extract || !archive.IsArchivePath(esrc) {
- // This source is a file, and either it's not an
- // archive, or we don't care whether or not it's an
- // archive.
- d := dest
- if destfi != nil && destfi.IsDir() {
- d = filepath.Join(dest, filepath.Base(gsrc))
+ getErr = copier.Get(contextDir, contextDir, getOptions, []string{glob}, writer)
+ closeErr = writer.Close()
+ if renameTarget != "" && renamedItems > 1 {
+ renameErr = errors.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems)
+ }
+ wg.Done()
+ }()
+ wg.Add(1)
+ go func() {
+ if st.IsDir {
+ b.ContentDigester.Start("dir")
+ } else {
+ b.ContentDigester.Start("file")
}
- // Copy the file, preserving attributes.
- logrus.Debugf("copying[%d] %q to %q", n, esrc, d)
- if err = copyFileWithTar(esrc, d); err != nil {
- return errors.Wrapf(err, "error copying %q to %q", esrc, d)
+ hashCloser := b.ContentDigester.Hash()
+ hasher := io.Writer(hashCloser)
+ if options.Hasher != nil {
+ hasher = io.MultiWriter(hasher, options.Hasher)
+ }
+ if options.DryRun {
+ _, putErr = io.Copy(hasher, pipeReader)
+ } else {
+ putOptions := copier.PutOptions{
+ UIDMap: destUIDMap,
+ GIDMap: destGIDMap,
+ ChownDirs: chownDirs,
+ ChmodDirs: chmodDirs,
+ ChownFiles: chownFiles,
+ ChmodFiles: chmodFiles,
+ }
+ putErr = copier.Put(mountPoint, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
}
- continue
+ hashCloser.Close()
+ pipeReader.Close()
+ wg.Done()
+ }()
+ wg.Wait()
+ if getErr != nil {
+ getErr = errors.Wrapf(getErr, "error reading %q", src)
}
-
- // We're extracting an archive into the destination directory.
- logrus.Debugf("extracting contents[%d] of %q into %q", n, esrc, dest)
- if err = untarPath(esrc, dest); err != nil {
- return errors.Wrapf(err, "error extracting %q into %q", esrc, dest)
+ if closeErr != nil {
+ closeErr = errors.Wrapf(closeErr, "error closing %q", src)
+ }
+ if renameErr != nil {
+ renameErr = errors.Wrapf(renameErr, "error renaming %q", src)
+ }
+ if putErr != nil {
+ putErr = errors.Wrapf(putErr, "error storing %q", src)
+ }
+ multiErr = multierror.Append(getErr, closeErr, renameErr, putErr)
+ if multiErr != nil && multiErr.ErrorOrNil() != nil {
+ if len(multiErr.Errors) > 1 {
+ return multiErr.ErrorOrNil()
+ }
+ return multiErr.Errors[0]
}
+ itemsCopied++
+ }
+ if itemsCopied == 0 {
+ return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered)", localSourceStat.Glob, len(localSourceStat.Globbed))
}
}
return nil
}
+
+// user returns the user (and group) information which the destination should belong to.
+func (b *Builder) user(mountPoint string, userspec string) (specs.User, string, error) {
+ if userspec == "" {
+ userspec = b.User()
+ }
+
+ uid, gid, homeDir, err := chrootuser.GetUser(mountPoint, userspec)
+ u := specs.User{
+ UID: uid,
+ GID: gid,
+ Username: userspec,
+ }
+ if !strings.Contains(userspec, ":") {
+ groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
+ if err2 != nil {
+ if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil {
+ err = err2
+ }
+ } else {
+ u.AdditionalGids = groups
+ }
+
+ }
+ return u, homeDir, err
+}
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index f5be7efbd..d001b8a10 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -28,7 +28,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.16.0-dev"
+ Version = "1.16.1"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt
index ab0fd2415..ec5db6eac 100644
--- a/vendor/github.com/containers/buildah/changelog.txt
+++ b/vendor/github.com/containers/buildah/changelog.txt
@@ -1,3 +1,116 @@
+
+- Changelog for v1.16.1 (2020-09-10)
+ * copier.Get(): hard link targets shouldn't be relative paths
+
+- Changelog for v1.16.0 (2020-09-03)
+ * fix build on 32bit arches
+ * containerImageRef.NewImageSource(): don't always force timestamps
+ * Add fuse module warning to image readme
+ * Heed our retry delay option values when retrying commit/pull/push
+ * Switch to containers/common for seccomp
+ * Use --timestamp rather then --omit-timestamp
+ * docs: remove outdated notice
+ * docs: remove outdated notice
+ * build-using-dockerfile: add a hidden --log-rusage flag
+ * build(deps): bump github.com/containers/image/v5 from 5.5.1 to 5.5.2
+ * Discard ReportWriter if user sets options.Quiet
+ * build(deps): bump github.com/containers/common from 0.19.0 to 0.20.3
+ * Fix ownership of content copied using COPY --from
+ * newTarDigester: zero out timestamps in tar headers
+ * Update nix pin with `make nixpkgs`
+ * bud.bats: correct .dockerignore integration tests
+ * Use pipes for copying
+ * run: include stdout in error message
+ * run: use the correct error for errors.Wrapf
+ * copier: un-export internal types
+ * copier: add Mkdir()
+ * in_podman: don't get tripped up by $CIRRUS_CHANGE_TITLE
+ * docs/buildah-commit.md: tweak some wording, add a --rm example
+ * imagebuildah: don’t blank out destination names when COPYing
+ * Replace retry functions with common/pkg/retry
+ * StageExecutor.historyMatches: compare timestamps using .Equal
+ * Update vendor of containers/common
+ * Fix errors found in coverity scan
+ * Change namespace handling flags to better match podman commands
+ * conformance testing: ignore buildah.BuilderIdentityAnnotation labels
+ * Vendor in containers/storage v1.23.0
+ * Add buildah.IsContainer interface
+ * Avoid feeding run_buildah to pipe
+ * fix(buildahimage): add xz dependency in buildah image
+ * Bump github.com/containers/common from 0.15.2 to 0.18.0
+ * Howto for rootless image building from OpenShift
+ * Add --omit-timestamp flag to buildah bud
+ * Update nix pin with `make nixpkgs`
+ * Shutdown storage on failures
+ * Handle COPY --from when an argument is used
+ * Bump github.com/seccomp/containers-golang from 0.5.0 to 0.6.0
+ * Cirrus: Use newly built VM images
+ * Bump github.com/opencontainers/runc from 1.0.0-rc91 to 1.0.0-rc92
+ * Enhance the .dockerignore man pages
+ * conformance: add a test for COPY from subdirectory
+ * fix bug manifest inspct
+ * Add documentation for .dockerignore
+ * Add BuilderIdentityAnnotation to identify buildah version
+ * DOC: Add quay.io/containers/buildah image to README.md
+ * Update buildahimages readme
+ * fix spelling mistake in "info" command result display
+ * Don't bind /etc/host and /etc/resolv.conf if network is not present
+ * blobcache: avoid an unnecessary NewImage()
+ * Build static binary with `buildGoModule`
+ * copier: split StripSetidBits into StripSetuidBit/StripSetgidBit/StripStickyBit
+ * tarFilterer: handle multiple archives
+ * Fix a race we hit during conformance tests
+ * Rework conformance testing
+ * Update 02-registries-repositories.md
+ * test-unit: invoke cmd/buildah tests with --flags
+ * parse: fix a type mismatch in a test
+ * Fix compilation of tests/testreport/testreport
+ * build.sh: log the version of Go that we're using
+ * test-unit: increase the test timeout to 40/45 minutes
+ * Add the "copier" package
+ * Fix & add notes regarding problematic language in codebase
+ * Add dependency on github.com/stretchr/testify/require
+ * CompositeDigester: add the ability to filter tar streams
+ * BATS tests: make more robust
+ * vendor golang.org/x/text@v0.3.3
+ * Switch golang 1.12 to golang 1.13
+ * imagebuildah: wait for stages that might not have even started yet
+ * chroot, run: not fail on bind mounts from /sys
+ * chroot: do not use setgroups if it is blocked
+ * Set engine env from containers.conf
+ * imagebuildah: return the right stage's image as the "final" image
+ * Fix a help string
+ * Deduplicate environment variables
+ * switch containers/libpod to containers/podman
+ * Bump github.com/containers/ocicrypt from 1.0.2 to 1.0.3
+ * Bump github.com/opencontainers/selinux from 1.5.2 to 1.6.0
+ * Mask out /sys/dev to prevent information leak
+ * linux: skip errors from the runtime kill
+ * Mask over the /sys/fs/selinux in mask branch
+ * Add VFS additional image store to container
+ * tests: add auth tests
+ * Allow "readonly" as alias to "ro" in mount options
+ * Ignore OS X specific consistency mount option
+ * Bump github.com/onsi/ginkgo from 1.13.0 to 1.14.0
+ * Bump github.com/containers/common from 0.14.0 to 0.15.2
+ * Rootless Buildah should default to IsolationOCIRootless
+ * imagebuildah: fix inheriting multi-stage builds
+ * Make imagebuildah.BuildOptions.Architecture/OS optional
+ * Make imagebuildah.BuildOptions.Jobs optional
+ * Resolve a possible race in imagebuildah.Executor.startStage()
+ * Switch scripts to use containers.conf
+ * Bump openshift/imagebuilder to v1.1.6
+ * Bump go.etcd.io/bbolt from 1.3.4 to 1.3.5
+ * buildah, bud: support --jobs=N for parallel execution
+ * executor: refactor build code inside new function
+ * Add bud regression tests
+ * Cirrus: Fix missing htpasswd in registry img
+ * docs: clarify the 'triples' format
+ * CHANGELOG.md: Fix markdown formatting
+ * Add nix derivation for static builds
+ * Bump to v1.16.0-dev
+ * add version centos7 for compatible
+
- Changelog for v1.15.0 (2020-06-17)
* Bump github.com/containers/common from 0.12.0 to 0.13.1
* Bump github.com/containers/storage from 1.20.1 to 1.20.2
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index 6c3febd5d..38601fbad 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -79,6 +79,7 @@ type CommitOptions struct {
EmptyLayer bool
// OmitTimestamp forces epoch 0 as created timestamp to allow for
// deterministic, content-addressable builds.
+ // Deprecated use HistoryTimestamp instead.
OmitTimestamp bool
// SignBy is the fingerprint of a GPG key to use for signing the image.
SignBy string
@@ -231,6 +232,13 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
// want to compute here because we'll have to do it again when
// cp.Image() instantiates a source image, and we don't want to do the
// work twice.
+ if options.OmitTimestamp {
+ if options.HistoryTimestamp != nil {
+ return imgID, nil, "", errors.Errorf("OmitTimestamp ahd HistoryTimestamp can not be used together")
+ }
+ timestamp := time.Unix(0, 0).UTC()
+ options.HistoryTimestamp = &timestamp
+ }
nameToRemove := ""
if dest == nil {
nameToRemove = stringid.GenerateRandomID() + "-tmp"
@@ -344,7 +352,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
var manifestBytes []byte
- if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, "push", getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
+ if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID)
}
// If we've got more names to attach, and we know how to do that for
@@ -476,7 +484,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
systemContext.DirForceCompress = true
}
var manifestBytes []byte
- if manifestBytes, err = retryCopyImage(ctx, policyContext, dest, maybeCachedSrc, dest, "push", getCopyOptions(options.Store, options.ReportWriter, nil, systemContext, options.ManifestType, options.RemoveSignatures, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
+ if manifestBytes, err = retryCopyImage(ctx, policyContext, dest, maybeCachedSrc, dest, getCopyOptions(options.Store, options.ReportWriter, nil, systemContext, options.ManifestType, options.RemoveSignatures, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(maybeCachedSrc), transports.ImageName(dest))
}
if options.ReportWriter != nil {
diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go
index b43cfffc9..594362300 100644
--- a/vendor/github.com/containers/buildah/common.go
+++ b/vendor/github.com/containers/buildah/common.go
@@ -3,13 +3,11 @@ package buildah
import (
"context"
"io"
- "net"
- "net/url"
"os"
"path/filepath"
- "syscall"
"time"
+ "github.com/containers/common/pkg/retry"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/signature"
@@ -17,11 +15,6 @@ import (
encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/unshare"
- "github.com/docker/distribution/registry/api/errcode"
- errcodev2 "github.com/docker/distribution/registry/api/v2"
- multierror "github.com/hashicorp/go-multierror"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
const (
@@ -76,64 +69,22 @@ func getSystemContext(store storage.Store, defaults *types.SystemContext, signat
return sc
}
-func isRetryable(err error) bool {
- err = errors.Cause(err)
- type unwrapper interface {
- Unwrap() error
- }
- if unwrapper, ok := err.(unwrapper); ok {
- err = unwrapper.Unwrap()
- return isRetryable(err)
- }
- if registryError, ok := err.(errcode.Error); ok {
- switch registryError.Code {
- case errcode.ErrorCodeUnauthorized, errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown:
- return false
- }
- return true
- }
- if op, ok := err.(*net.OpError); ok {
- return isRetryable(op.Err)
- }
- if url, ok := err.(*url.Error); ok {
- return isRetryable(url.Err)
- }
- if errno, ok := err.(syscall.Errno); ok {
- if errno == syscall.ECONNREFUSED {
- return false
- }
- }
- if errs, ok := err.(errcode.Errors); ok {
- // if this error is a group of errors, process them all in turn
- for i := range errs {
- if !isRetryable(errs[i]) {
- return false
- }
- }
- }
- if errs, ok := err.(*multierror.Error); ok {
- // if this error is a group of errors, process them all in turn
- for i := range errs.Errors {
- if !isRetryable(errs.Errors[i]) {
- return false
- }
- }
- }
- return true
-}
-
-func retryCopyImage(ctx context.Context, policyContext *signature.PolicyContext, dest, src, registry types.ImageReference, action string, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration) ([]byte, error) {
- manifestBytes, err := cp.Image(ctx, policyContext, dest, src, copyOptions)
- for retries := 0; err != nil && isRetryable(err) && registry != nil && registry.Transport().Name() == docker.Transport.Name() && retries < maxRetries; retries++ {
- if retryDelay == 0 {
- retryDelay = 5 * time.Second
- }
- logrus.Infof("Warning: %s failed, retrying in %s ... (%d/%d)", action, retryDelay, retries+1, maxRetries)
- time.Sleep(retryDelay)
+func retryCopyImage(ctx context.Context, policyContext *signature.PolicyContext, dest, src, registry types.ImageReference, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration) ([]byte, error) {
+ var (
+ manifestBytes []byte
+ err error
+ lastErr error
+ )
+ err = retry.RetryIfNecessary(ctx, func() error {
manifestBytes, err = cp.Image(ctx, policyContext, dest, src, copyOptions)
- if err == nil {
- break
+ if registry != nil && registry.Transport().Name() != docker.Transport.Name() {
+ lastErr = err
+ return nil
}
+ return err
+ }, &retry.RetryOptions{MaxRetry: maxRetries, Delay: retryDelay})
+ if lastErr != nil {
+ err = lastErr
}
return manifestBytes, err
}
diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go
new file mode 100644
index 000000000..a980fe292
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/copier.go
@@ -0,0 +1,1526 @@
+package copier
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/containers/buildah/util"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/storage/pkg/fileutils"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ copierCommand = "buildah-copier"
+ maxLoopsFollowed = 64
+ // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06, from archive/tar
+ cISUID = 04000 // Set uid, from archive/tar
+ cISGID = 02000 // Set gid, from archive/tar
+ cISVTX = 01000 // Save text (sticky bit), from archive/tar
+)
+
+func init() {
+ reexec.Register(copierCommand, copierMain)
+}
+
+// isArchivePath returns true if the specified path can be read like a (possibly
+// compressed) tarball.
+func isArchivePath(path string) bool {
+ f, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+ defer f.Close()
+ rc, _, err := compression.AutoDecompress(f)
+ if err != nil {
+ return false
+ }
+ defer rc.Close()
+ tr := tar.NewReader(rc)
+ _, err = tr.Next()
+ return err == nil
+}
+
+// requestType encodes exactly what kind of request this is.
+type requestType string
+
+const (
+ requestStat requestType = "STAT"
+ requestGet requestType = "GET"
+ requestPut requestType = "PUT"
+ requestMkdir requestType = "MKDIR"
+ requestQuit requestType = "QUIT"
+)
+
+// Request encodes a single request.
+type request struct {
+ Request requestType
+ Root string // used by all requests
+ preservedRoot string
+ rootPrefix string // used to reconstruct paths being handed back to the caller
+ Directory string // used by all requests
+ preservedDirectory string
+ Globs []string `json:",omitempty"` // used by stat, get
+ preservedGlobs []string
+ StatOptions StatOptions `json:",omitempty"`
+ GetOptions GetOptions `json:",omitempty"`
+ PutOptions PutOptions `json:",omitempty"`
+ MkdirOptions MkdirOptions `json:",omitempty"`
+}
+
+func (req *request) Excludes() []string {
+ switch req.Request {
+ case requestStat:
+ return req.StatOptions.Excludes
+ case requestGet:
+ return req.GetOptions.Excludes
+ case requestPut:
+ return nil
+ case requestMkdir:
+ return nil
+ case requestQuit:
+ return nil
+ default:
+ panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
+ }
+}
+
+func (req *request) UIDMap() []idtools.IDMap {
+ switch req.Request {
+ case requestStat:
+ return nil
+ case requestGet:
+ return req.GetOptions.UIDMap
+ case requestPut:
+ return req.PutOptions.UIDMap
+ case requestMkdir:
+ return req.MkdirOptions.UIDMap
+ case requestQuit:
+ return nil
+ default:
+ panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
+ }
+}
+
+func (req *request) GIDMap() []idtools.IDMap {
+ switch req.Request {
+ case requestStat:
+ return nil
+ case requestGet:
+ return req.GetOptions.GIDMap
+ case requestPut:
+ return req.PutOptions.GIDMap
+ case requestMkdir:
+ return req.MkdirOptions.GIDMap
+ case requestQuit:
+ return nil
+ default:
+ panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
+ }
+}
+
+// Response encodes a single response.
+type response struct {
+ Error string `json:",omitempty"`
+ Stat statResponse
+ Get getResponse
+ Put putResponse
+ Mkdir mkdirResponse
+}
+
+// statResponse encodes a response for a single Stat request.
+type statResponse struct {
+ Globs []*StatsForGlob
+}
+
+// StatsForGlob encode results for a single glob pattern passed to Stat().
+type StatsForGlob struct {
+ Error string `json:",omitempty"` // error if the Glob pattern was malformed
+ Glob string // input pattern to which this result corresponds
+ Globbed []string // a slice of zero or more names that match the glob
+ Results map[string]*StatForItem // one for each Globbed value if there are any, or for Glob
+}
+
+// StatForItem encode results for a single filesystem item, as returned by Stat().
+type StatForItem struct {
+ Error string `json:",omitempty"`
+ Name string
+ Size int64 // dereferenced value for symlinks
+ Mode os.FileMode // dereferenced value for symlinks
+ ModTime time.Time // dereferenced value for symlinks
+ IsSymlink bool
+ IsDir bool // dereferenced value for symlinks
+ IsRegular bool // dereferenced value for symlinks
+ IsArchive bool // dereferenced value for symlinks
+ ImmediateTarget string `json:",omitempty"` // raw link content
+}
+
+// getResponse encodes a response for a single Get request.
+type getResponse struct {
+}
+
+// putResponse encodes a response for a single Put request.
+type putResponse struct {
+}
+
+// mkdirResponse encodes a response for a single Mkdir request.
+type mkdirResponse struct {
+}
+
+// StatOptions controls parts of Stat()'s behavior.
+type StatOptions struct {
+ CheckForArchives bool // check for and populate the IsArchive bit in returned values
+ Excludes []string // contents to pretend don't exist, using the OS-specific path separator
+}
+
+// Stat globs the specified pattern in the specified directory and returns its
+// results.
+// If root and directory are both not specified, the current root directory is
+// used, and relative names in the globs list are treated as being relative to
+// the current working directory.
+// If root is specified and the current OS supports it, the stat() is performed
+// in a chrooted context. If the directory is specified as an absolute path,
+// it should either be the root directory or a subdirectory of the root
+// directory. Otherwise, the directory is treated as a path relative to the
+// root directory.
+// Relative names in the glob list are treated as being relative to the
+// directory.
+func Stat(root string, directory string, options StatOptions, globs []string) ([]*StatsForGlob, error) {
+ req := request{
+ Request: requestStat,
+ Root: root,
+ Directory: directory,
+ Globs: append([]string{}, globs...),
+ StatOptions: options,
+ }
+ resp, err := copier(nil, nil, req)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != "" {
+ return nil, errors.New(resp.Error)
+ }
+ return resp.Stat.Globs, nil
+}
+
+// GetOptions controls parts of Get()'s behavior.
+type GetOptions struct {
+ UIDMap, GIDMap []idtools.IDMap // map from hostIDs to containerIDs in the output archive
+ Excludes []string // contents to pretend don't exist, using the OS-specific path separator
+ ExpandArchives bool // extract the contents of named items that are archives
+ StripSetuidBit bool // strip the setuid bit off of items being copied. no effect on archives being extracted
+ StripSetgidBit bool // strip the setgid bit off of items being copied. no effect on archives being extracted
+ StripStickyBit bool // strip the sticky bit off of items being copied. no effect on archives being extracted
+ StripXattrs bool // don't record extended attributes of items being copied. no effect on archives being extracted
+ KeepDirectoryNames bool // don't strip the top directory's basename from the paths of items in subdirectories
+}
+
+// Get produces an archive containing items that match the specified glob
+// patterns and writes it to bulkWriter.
+// If root and directory are both not specified, the current root directory is
+// used, and relative names in the globs list are treated as being relative to
+// the current working directory.
+// If root is specified and the current OS supports it, the contents are read
+// in a chrooted context. If the directory is specified as an absolute path,
+// it should either be the root directory or a subdirectory of the root
+// directory. Otherwise, the directory is treated as a path relative to the
+// root directory.
+// Relative names in the glob list are treated as being relative to the
+// directory.
+func Get(root string, directory string, options GetOptions, globs []string, bulkWriter io.Writer) error {
+ req := request{
+ Request: requestGet,
+ Root: root,
+ Directory: directory,
+ Globs: append([]string{}, globs...),
+ StatOptions: StatOptions{
+ CheckForArchives: options.ExpandArchives,
+ },
+ GetOptions: options,
+ }
+ resp, err := copier(nil, bulkWriter, req)
+ if err != nil {
+ return err
+ }
+ if resp.Error != "" {
+ return errors.New(resp.Error)
+ }
+ return nil
+}
+
+// PutOptions controls parts of Put()'s behavior.
+type PutOptions struct {
+ UIDMap, GIDMap []idtools.IDMap // map from containerIDs to hostIDs when writing contents to disk
+ ChownDirs *idtools.IDPair // set ownership of newly-created directories
+ ChmodDirs *os.FileMode // set permissions on newly-created directories
+ ChownFiles *idtools.IDPair // set ownership of newly-created files
+ ChmodFiles *os.FileMode // set permissions on newly-created files
+ StripXattrs bool // don't bother trying to set extended attributes of items being copied
+ IgnoreXattrErrors bool // ignore any errors encountered when attempting to set extended attributes
+}
+
+// Put extracts an archive from the bulkReader at the specified directory.
+// If root and directory are both not specified, the current root directory is
+// used.
+// If root is specified and the current OS supports it, the contents are written
+// in a chrooted context. If the directory is specified as an absolute path,
+// it should either be the root directory or a subdirectory of the root
+// directory. Otherwise, the directory is treated as a path relative to the
+// root directory.
+func Put(root string, directory string, options PutOptions, bulkReader io.Reader) error {
+ req := request{
+ Request: requestPut,
+ Root: root,
+ Directory: directory,
+ PutOptions: options,
+ }
+ resp, err := copier(bulkReader, nil, req)
+ if err != nil {
+ return err
+ }
+ if resp.Error != "" {
+ return errors.New(resp.Error)
+ }
+ return nil
+}
+
+// MkdirOptions controls parts of Mkdir()'s behavior.
+type MkdirOptions struct {
+ UIDMap, GIDMap []idtools.IDMap // map from containerIDs to hostIDs when creating directories
+ ChownNew *idtools.IDPair // set ownership of newly-created directories
+ ChmodNew *os.FileMode // set permissions on newly-created directories
+}
+
+// Mkdir ensures that the specified directory exists. Any directories which
+// need to be created will be given the specified ownership and permissions.
+// If root and directory are both not specified, the current root directory is
+// used.
+// If root is specified and the current OS supports it, the directory is
+// created in a chrooted context. If the directory is specified as an absolute
+// path, it should either be the root directory or a subdirectory of the root
+// directory. Otherwise, the directory is treated as a path relative to the
+// root directory.
+func Mkdir(root string, directory string, options MkdirOptions) error {
+ req := request{
+ Request: requestMkdir,
+ Root: root,
+ Directory: directory,
+ MkdirOptions: options,
+ }
+ resp, err := copier(nil, nil, req)
+ if err != nil {
+ return err
+ }
+ if resp.Error != "" {
+ return errors.New(resp.Error)
+ }
+ return nil
+}
+
+// cleanerReldirectory resolves relative path candidate lexically, attempting
+// to ensure that when joined as a subdirectory of another directory, it does
+// not reference anything outside of that other directory.
+func cleanerReldirectory(candidate string) string {
+ cleaned := strings.TrimPrefix(filepath.Clean(string(os.PathSeparator)+candidate), string(os.PathSeparator))
+ if cleaned == "" {
+ return "."
+ }
+ return cleaned
+}
+
+// convertToRelSubirectory returns the path of directory, bound and relative to
+// root, as a relative path, or an error if that path can't be computed or if
+// the two directories are on different volumes
+func convertToRelSubdirectory(root, directory string) (relative string, err error) {
+ if root == "" || !filepath.IsAbs(root) {
+ return "", errors.Errorf("expected root directory to be an absolute path, got %q", root)
+ }
+ if directory == "" || !filepath.IsAbs(directory) {
+ return "", errors.Errorf("expected directory to be an absolute path, got %q", root)
+ }
+ if filepath.VolumeName(root) != filepath.VolumeName(directory) {
+ return "", errors.Errorf("%q and %q are on different volumes", root, directory)
+ }
+ rel, err := filepath.Rel(root, directory)
+ if err != nil {
+ return "", errors.Wrapf(err, "error computing path of %q relative to %q", directory, root)
+ }
+ return cleanerReldirectory(rel), nil
+}
+
+func currentVolumeRoot() (string, error) {
+ cwd, err := os.Getwd()
+ if err != nil {
+ return "", errors.Wrapf(err, "error getting current working directory")
+ }
+ return filepath.VolumeName(cwd) + string(os.PathSeparator), nil
+}
+
+func isVolumeRoot(candidate string) (bool, error) {
+ abs, err := filepath.Abs(candidate)
+ if err != nil {
+ return false, errors.Wrapf(err, "error converting %q to an absolute path", candidate)
+ }
+ return abs == filepath.VolumeName(abs)+string(os.PathSeparator), nil
+}
+
+func looksLikeAbs(candidate string) bool {
+ return candidate[0] == os.PathSeparator && (len(candidate) == 1 || candidate[1] != os.PathSeparator)
+}
+
+func copier(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, error) {
+ if req.Directory == "" {
+ if req.Root == "" {
+ wd, err := os.Getwd()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting current working directory")
+ }
+ req.Directory = wd
+ } else {
+ req.Directory = req.Root
+ }
+ }
+ if req.Root == "" {
+ root, err := currentVolumeRoot()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error determining root of current volume")
+ }
+ req.Root = root
+ }
+ if filepath.IsAbs(req.Directory) {
+ _, err := convertToRelSubdirectory(req.Root, req.Directory)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error rewriting %q to be relative to %q", req.Directory, req.Root)
+ }
+ }
+ isAlreadyRoot, err := isVolumeRoot(req.Root)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error checking if %q is a root directory", req.Root)
+ }
+ if !isAlreadyRoot && canChroot {
+ return copierWithSubprocess(bulkReader, bulkWriter, req)
+ }
+ return copierWithoutSubprocess(bulkReader, bulkWriter, req)
+}
+
+func copierWithoutSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, error) {
+ req.preservedRoot = req.Root
+ req.rootPrefix = string(os.PathSeparator)
+ req.preservedDirectory = req.Directory
+ req.preservedGlobs = append([]string{}, req.Globs...)
+ if !filepath.IsAbs(req.Directory) {
+ req.Directory = filepath.Join(req.Root, cleanerReldirectory(req.Directory))
+ }
+ absoluteGlobs := make([]string, 0, len(req.Globs))
+ for _, glob := range req.preservedGlobs {
+ if filepath.IsAbs(glob) {
+ relativeGlob, err := convertToRelSubdirectory(req.preservedRoot, glob)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error rewriting %q to be relative to %q: %v", glob, req.preservedRoot, err)
+ os.Exit(1)
+ }
+ absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Root, string(os.PathSeparator)+relativeGlob))
+ } else {
+ absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Directory, cleanerReldirectory(glob)))
+ }
+ }
+ req.Globs = absoluteGlobs
+ resp, cb, err := copierHandler(bulkReader, bulkWriter, req)
+ if err != nil {
+ return nil, err
+ }
+ if cb != nil {
+ if err = cb(); err != nil {
+ return nil, err
+ }
+ }
+ return resp, nil
+}
+
+func closeIfNotNilYet(f **os.File, what string) {
+ if f != nil && *f != nil {
+ err := (*f).Close()
+ *f = nil
+ if err != nil {
+ logrus.Debugf("error closing %s: %v", what, err)
+ }
+ }
+}
+
+func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req request) (resp *response, err error) {
+ if bulkReader == nil {
+ bulkReader = bytes.NewReader([]byte{})
+ }
+ if bulkWriter == nil {
+ bulkWriter = ioutil.Discard
+ }
+ cmd := reexec.Command(copierCommand)
+ stdinRead, stdinWrite, err := os.Pipe()
+ if err != nil {
+ return nil, errors.Wrapf(err, "pipe")
+ }
+ defer closeIfNotNilYet(&stdinRead, "stdin pipe reader")
+ defer closeIfNotNilYet(&stdinWrite, "stdin pipe writer")
+ encoder := json.NewEncoder(stdinWrite)
+ stdoutRead, stdoutWrite, err := os.Pipe()
+ if err != nil {
+ return nil, errors.Wrapf(err, "pipe")
+ }
+ defer closeIfNotNilYet(&stdoutRead, "stdout pipe reader")
+ defer closeIfNotNilYet(&stdoutWrite, "stdout pipe writer")
+ decoder := json.NewDecoder(stdoutRead)
+ bulkReaderRead, bulkReaderWrite, err := os.Pipe()
+ if err != nil {
+ return nil, errors.Wrapf(err, "pipe")
+ }
+ defer closeIfNotNilYet(&bulkReaderRead, "child bulk content reader pipe, read end")
+ defer closeIfNotNilYet(&bulkReaderWrite, "child bulk content reader pipe, write end")
+ bulkWriterRead, bulkWriterWrite, err := os.Pipe()
+ if err != nil {
+ return nil, errors.Wrapf(err, "pipe")
+ }
+ defer closeIfNotNilYet(&bulkWriterRead, "child bulk content writer pipe, read end")
+ defer closeIfNotNilYet(&bulkWriterWrite, "child bulk content writer pipe, write end")
+ cmd.Dir = "/"
+ cmd.Env = append([]string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}, os.Environ()...)
+
+ errorBuffer := bytes.Buffer{}
+ cmd.Stdin = stdinRead
+ cmd.Stdout = stdoutWrite
+ cmd.Stderr = &errorBuffer
+ cmd.ExtraFiles = []*os.File{bulkReaderRead, bulkWriterWrite}
+ if err = cmd.Start(); err != nil {
+ return nil, errors.Wrapf(err, "error starting subprocess")
+ }
+ cmdToWaitFor := cmd
+ defer func() {
+ if cmdToWaitFor != nil {
+ if err := cmdToWaitFor.Wait(); err != nil {
+ if errorBuffer.String() != "" {
+ logrus.Debug(errorBuffer.String())
+ }
+ }
+ }
+ }()
+ stdinRead.Close()
+ stdinRead = nil
+ stdoutWrite.Close()
+ stdoutWrite = nil
+ bulkReaderRead.Close()
+ bulkReaderRead = nil
+ bulkWriterWrite.Close()
+ bulkWriterWrite = nil
+ killAndReturn := func(err error, step string) (*response, error) { // nolint: unparam
+ if err2 := cmd.Process.Kill(); err2 != nil {
+ return nil, errors.Wrapf(err, "error killing subprocess: %v; %s", err2, step)
+ }
+ return nil, errors.Wrap(err, step)
+ }
+ if err = encoder.Encode(req); err != nil {
+ return killAndReturn(err, "error encoding request")
+ }
+ if err = decoder.Decode(&resp); err != nil {
+ return killAndReturn(err, "error decoding response")
+ }
+ if err = encoder.Encode(&request{Request: requestQuit}); err != nil {
+ return killAndReturn(err, "error encoding request")
+ }
+ stdinWrite.Close()
+ stdinWrite = nil
+ stdoutRead.Close()
+ stdoutRead = nil
+ var wg sync.WaitGroup
+ var readError, writeError error
+ wg.Add(1)
+ go func() {
+ _, writeError = io.Copy(bulkWriter, bulkWriterRead)
+ bulkWriterRead.Close()
+ bulkWriterRead = nil
+ wg.Done()
+ }()
+ wg.Add(1)
+ go func() {
+ _, readError = io.Copy(bulkReaderWrite, bulkReader)
+ bulkReaderWrite.Close()
+ bulkReaderWrite = nil
+ wg.Done()
+ }()
+ wg.Wait()
+ cmdToWaitFor = nil
+ if err = cmd.Wait(); err != nil {
+ if errorBuffer.String() != "" {
+ err = fmt.Errorf("%s", errorBuffer.String())
+ }
+ return nil, err
+ }
+ if cmd.ProcessState.Exited() && !cmd.ProcessState.Success() {
+ err = fmt.Errorf("subprocess exited with error")
+ if errorBuffer.String() != "" {
+ err = fmt.Errorf("%s", errorBuffer.String())
+ }
+ return nil, err
+ }
+ if readError != nil {
+ return nil, errors.Wrapf(readError, "error passing bulk input to subprocess")
+ }
+ if writeError != nil {
+ return nil, errors.Wrapf(writeError, "error passing bulk output from subprocess")
+ }
+ return resp, nil
+}
+
+func copierMain() {
+ var chrooted bool
+ decoder := json.NewDecoder(os.Stdin)
+ encoder := json.NewEncoder(os.Stdout)
+ previousRequestRoot := ""
+
+ // Set logging.
+ if level := os.Getenv("LOGLEVEL"); level != "" {
+ if ll, err := strconv.Atoi(level); err == nil {
+ logrus.SetLevel(logrus.Level(ll))
+ }
+ }
+
+ // Set up descriptors for receiving and sending tarstreams.
+ bulkReader := os.NewFile(3, "bulk-reader")
+ bulkWriter := os.NewFile(4, "bulk-writer")
+
+ for {
+ // Read a request.
+ req := new(request)
+ if err := decoder.Decode(req); err != nil {
+ fmt.Fprintf(os.Stderr, "error decoding request: %v", err)
+ os.Exit(1)
+ }
+ if req.Request == requestQuit {
+ // Making Quit a specific request means that we could
+ // run Stat() at a caller's behest before using the
+ // same process for Get() or Put(). Maybe later.
+ break
+ }
+
+ // Multiple requests should list the same root, because we
+ // can't un-chroot to chroot to some other location.
+ if previousRequestRoot != "" {
+ // Check that we got the same input value for
+ // where-to-chroot-to.
+ if req.Root != previousRequestRoot {
+ fmt.Fprintf(os.Stderr, "error: can't change location of chroot from %q to %q", previousRequestRoot, req.Root)
+ os.Exit(1)
+ }
+ previousRequestRoot = req.Root
+ } else {
+ // Figure out where to chroot to, if we weren't told.
+ if req.Root == "" {
+ root, err := currentVolumeRoot()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error determining root of current volume: %v", err)
+ os.Exit(1)
+ }
+ req.Root = root
+ }
+ // Change to the specified root directory.
+ var err error
+ chrooted, err = chroot(req.Root)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error changing to intended-new-root directory %q: %v", req.Root, err)
+ os.Exit(1)
+ }
+ }
+
+ req.preservedRoot = req.Root
+ req.rootPrefix = string(os.PathSeparator)
+ req.preservedDirectory = req.Directory
+ req.preservedGlobs = append([]string{}, req.Globs...)
+ if chrooted {
+ // We'll need to adjust some things now that the root
+ // directory isn't what it was. Make the directory and
+ // globs absolute paths for simplicity's sake.
+ absoluteDirectory := req.Directory
+ if !filepath.IsAbs(req.Directory) {
+ absoluteDirectory = filepath.Join(req.Root, cleanerReldirectory(req.Directory))
+ }
+ relativeDirectory, err := convertToRelSubdirectory(req.preservedRoot, absoluteDirectory)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error rewriting %q to be relative to %q: %v", absoluteDirectory, req.preservedRoot, err)
+ os.Exit(1)
+ }
+ req.Directory = filepath.Clean(string(os.PathSeparator) + relativeDirectory)
+ absoluteGlobs := make([]string, 0, len(req.Globs))
+ for i, glob := range req.preservedGlobs {
+ if filepath.IsAbs(glob) {
+ relativeGlob, err := convertToRelSubdirectory(req.preservedRoot, glob)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error rewriting %q to be relative to %q: %v", glob, req.preservedRoot, err)
+ os.Exit(1)
+ }
+ absoluteGlobs = append(absoluteGlobs, filepath.Clean(string(os.PathSeparator)+relativeGlob))
+ } else {
+ absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Directory, cleanerReldirectory(req.Globs[i])))
+ }
+ }
+ req.Globs = absoluteGlobs
+ req.rootPrefix = req.Root
+ req.Root = string(os.PathSeparator)
+ } else {
+ // Make the directory and globs absolute paths for
+ // simplicity's sake.
+ if !filepath.IsAbs(req.Directory) {
+ req.Directory = filepath.Join(req.Root, cleanerReldirectory(req.Directory))
+ }
+ absoluteGlobs := make([]string, 0, len(req.Globs))
+ for i, glob := range req.preservedGlobs {
+ if filepath.IsAbs(glob) {
+ absoluteGlobs = append(absoluteGlobs, req.Globs[i])
+ } else {
+ absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Directory, cleanerReldirectory(req.Globs[i])))
+ }
+ }
+ req.Globs = absoluteGlobs
+ }
+ resp, cb, err := copierHandler(bulkReader, bulkWriter, *req)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error handling request %#v: %v", *req, err)
+ os.Exit(1)
+ }
+ // Encode the response.
+ if err := encoder.Encode(resp); err != nil {
+ fmt.Fprintf(os.Stderr, "error encoding response %#v: %v", *req, err)
+ os.Exit(1)
+ }
+ // If there's bulk data to transfer, run the callback to either
+ // read or write it.
+ if cb != nil {
+ if err = cb(); err != nil {
+ fmt.Fprintf(os.Stderr, "error during bulk transfer for %#v: %v", *req, err)
+ os.Exit(1)
+ }
+ }
+ }
+}
+
+func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, func() error, error) {
+ // NewPatternMatcher splits patterns into components using
+ // os.PathSeparator, implying that it expects OS-specific naming
+ // conventions.
+ excludes := req.Excludes()
+ pm, err := fileutils.NewPatternMatcher(excludes)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error processing excludes list %v", excludes)
+ }
+
+ var idMappings *idtools.IDMappings
+ uidMap, gidMap := req.UIDMap(), req.GIDMap()
+ if len(uidMap) > 0 && len(gidMap) > 0 {
+ idMappings = idtools.NewIDMappingsFromMaps(uidMap, gidMap)
+ }
+
+ switch req.Request {
+ default:
+ return nil, nil, errors.Errorf("not an implemented request type: %q", req.Request)
+ case requestStat:
+ resp := copierHandlerStat(req, pm)
+ return resp, nil, nil
+ case requestGet:
+ return copierHandlerGet(bulkWriter, req, pm, idMappings)
+ case requestPut:
+ return copierHandlerPut(bulkReader, req, idMappings)
+ case requestMkdir:
+ return copierHandlerMkdir(req, idMappings)
+ case requestQuit:
+ return nil, nil, nil
+ }
+}
+
+// pathIsExcluded computes path relative to root, then asks the pattern matcher
+// if the result is excluded. Returns the relative path and the matcher's
+// results.
+func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bool, error) {
+ rel, err := convertToRelSubdirectory(root, path)
+ if err != nil {
+ return "", false, errors.Wrapf(err, "copier: error computing path of %q relative to root %q", path, root)
+ }
+ if pm == nil {
+ return rel, false, nil
+ }
+ if rel == "." {
+ // special case
+ return rel, false, nil
+ }
+ // Matches uses filepath.FromSlash() to convert candidates before
+ // checking if they match the patterns it's been given, implying that
+ // it expects Unix-style paths.
+ matches, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
+ if err != nil {
+ return rel, false, errors.Wrapf(err, "copier: error checking if %q is excluded", rel)
+ }
+ if matches {
+ return rel, true, nil
+ }
+ return rel, false, nil
+}
+
+// resolvePath resolves symbolic links in paths, treating the specified
+// directory as the root.
+// Resolving the path this way, and using the result, is in no way secure
+// against another process manipulating the content that we're looking at, and
+// it is not expected to be.
+// This helps us approximate chrooted behavior on systems and in test cases
+// where chroot isn't available.
+func resolvePath(root, path string, pm *fileutils.PatternMatcher) (string, error) {
+ rel, err := convertToRelSubdirectory(root, path)
+ if err != nil {
+ return "", errors.Errorf("error making path %q relative to %q", path, root)
+ }
+ workingPath := root
+ followed := 0
+ components := strings.Split(rel, string(os.PathSeparator))
+ excluded := false
+ for len(components) > 0 {
+ // if anything we try to examine is excluded, then resolution has to "break"
+ _, thisExcluded, err := pathIsExcluded(root, filepath.Join(workingPath, components[0]), pm)
+ if err != nil {
+ return "", err
+ }
+ excluded = excluded || thisExcluded
+ if !excluded {
+ if target, err := os.Readlink(filepath.Join(workingPath, components[0])); err == nil {
+ followed++
+ if followed > maxLoopsFollowed {
+ return "", &os.PathError{
+ Op: "open",
+ Path: path,
+ Err: syscall.ELOOP,
+ }
+ }
+ if filepath.IsAbs(target) || looksLikeAbs(target) {
+ // symlink to an absolute path - prepend the
+ // root directory to that absolute path to
+ // replace the current location, and resolve
+ // the remaining components
+ workingPath = root
+ components = append(strings.Split(target, string(os.PathSeparator)), components[1:]...)
+ continue
+ }
+ // symlink to a relative path - add the link target to
+ // the current location to get the next location, and
+ // resolve the remaining components
+ rel, err := convertToRelSubdirectory(root, filepath.Join(workingPath, target))
+ if err != nil {
+ return "", errors.Errorf("error making path %q relative to %q", filepath.Join(workingPath, target), root)
+ }
+ workingPath = root
+ components = append(strings.Split(filepath.Clean(string(os.PathSeparator)+rel), string(os.PathSeparator)), components[1:]...)
+ continue
+ }
+ }
+ // append the current component's name to get the next location
+ workingPath = filepath.Join(workingPath, components[0])
+ if workingPath == filepath.Join(root, "..") {
+ // attempted to go above the root using a relative path .., scope it
+ workingPath = root
+ }
+ // ready to handle the next component
+ components = components[1:]
+ }
+ return workingPath, nil
+}
+
+func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
+ errorResponse := func(fmtspec string, args ...interface{}) *response {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse{}}
+ }
+ if len(req.Globs) == 0 {
+ return errorResponse("copier: stat: expected at least one glob pattern, got none")
+ }
+ var stats []*StatsForGlob
+ for i, glob := range req.Globs {
+ s := StatsForGlob{
+ Glob: req.preservedGlobs[i],
+ }
+ stats = append(stats, &s)
+ // glob this pattern
+ globMatched, err := filepath.Glob(glob)
+ if err != nil {
+ s.Error = fmt.Sprintf("copier: stat: %q while matching glob pattern %q", err.Error(), glob)
+ continue
+ }
+ // collect the matches
+ s.Globbed = make([]string, 0, len(globMatched))
+ s.Results = make(map[string]*StatForItem)
+ for _, globbed := range globMatched {
+ rel, excluded, err := pathIsExcluded(req.Root, globbed, pm)
+ if err != nil {
+ return errorResponse("copier: stat: %v", err)
+ }
+ if excluded {
+ continue
+ }
+ // if the glob was an absolute path, reconstruct the
+ // path that we should hand back for the match
+ var resultName string
+ if filepath.IsAbs(req.preservedGlobs[i]) {
+ resultName = filepath.Join(req.rootPrefix, globbed)
+ } else {
+ relResult := rel
+ if req.Directory != req.Root {
+ relResult, err = convertToRelSubdirectory(req.Directory, globbed)
+ if err != nil {
+ return errorResponse("copier: stat: error making %q relative to %q: %v", globbed, req.Directory, err)
+ }
+ }
+ resultName = relResult
+ }
+ result := StatForItem{Name: resultName}
+ s.Globbed = append(s.Globbed, resultName)
+ s.Results[resultName] = &result
+ // lstat the matched value
+ linfo, err := os.Lstat(globbed)
+ if err != nil {
+ result.Error = err.Error()
+ continue
+ }
+ result.Size = linfo.Size()
+ result.Mode = linfo.Mode()
+ result.ModTime = linfo.ModTime()
+ result.IsDir = linfo.IsDir()
+ result.IsRegular = result.Mode.IsRegular()
+ result.IsSymlink = (linfo.Mode() & os.ModeType) == os.ModeSymlink
+ checkForArchive := req.StatOptions.CheckForArchives
+ if result.IsSymlink {
+ // if the match was a symbolic link, read it
+ immediateTarget, err := os.Readlink(globbed)
+ if err != nil {
+ result.Error = err.Error()
+ continue
+ }
+ // record where it points, both by itself (it
+ // could be a relative link) and in the context
+ // of the chroot
+ result.ImmediateTarget = immediateTarget
+ resolvedTarget, err := resolvePath(req.Root, globbed, pm)
+ if err != nil {
+ return errorResponse("copier: stat: error resolving %q: %v", globbed, err)
+ }
+ // lstat the thing that we point to
+ info, err := os.Lstat(resolvedTarget)
+ if err != nil {
+ result.Error = err.Error()
+ continue
+ }
+ // replace IsArchive/IsDir/IsRegular with info about the target
+ if info.Mode().IsRegular() && req.StatOptions.CheckForArchives {
+ result.IsArchive = isArchivePath(resolvedTarget)
+ checkForArchive = false
+ }
+ result.IsDir = info.IsDir()
+ result.IsRegular = info.Mode().IsRegular()
+ }
+ if result.IsRegular && checkForArchive {
+ // we were asked to check on this, and it
+ // wasn't a symlink, in which case we'd have
+ // already checked what the link points to
+ result.IsArchive = isArchivePath(globbed)
+ }
+ }
+ // no unskipped matches -> error
+ if len(s.Globbed) == 0 {
+ s.Globbed = nil
+ s.Results = nil
+ s.Error = fmt.Sprintf("copier: stat: %q: %v", glob, syscall.ENOENT)
+ }
+ }
+ return &response{Stat: statResponse{Globs: stats}}
+}
+
+func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) (*response, func() error, error) {
+ statRequest := req
+ statRequest.Request = requestStat
+ statResponse := copierHandlerStat(req, pm)
+ errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse.Stat, Get: getResponse{}}, nil, nil
+ }
+ if statResponse.Error != "" {
+ return errorResponse("%s", statResponse.Error)
+ }
+ if len(req.Globs) == 0 {
+ return errorResponse("copier: get: expected at least one glob pattern, got 0")
+ }
+ // build a queue of items by globbing
+ var queue []string
+ globMatchedCount := 0
+ for _, glob := range req.Globs {
+ globMatched, err := filepath.Glob(glob)
+ if err != nil {
+ return errorResponse("copier: get: glob %q: %v", glob, err)
+ }
+ globMatchedCount += len(globMatched)
+ filtered := make([]string, 0, len(globMatched))
+ for _, globbed := range globMatched {
+ rel, excluded, err := pathIsExcluded(req.Root, globbed, pm)
+ if err != nil {
+ return errorResponse("copier: get: checking if %q is excluded: %v", globbed, err)
+ }
+ if rel == "." || !excluded {
+ filtered = append(filtered, globbed)
+ }
+ }
+ if len(filtered) == 0 {
+ return errorResponse("copier: get: glob %q matched nothing (%d filtered out of %v): %v", glob, len(globMatched), globMatched, syscall.ENOENT)
+ }
+ queue = append(queue, filtered...)
+ }
+ // no matches -> error
+ if len(queue) == 0 {
+ return errorResponse("copier: get: globs %v matched nothing (%d filtered out): %v", req.Globs, globMatchedCount, syscall.ENOENT)
+ }
+ cb := func() error {
+ tw := tar.NewWriter(bulkWriter)
+ defer tw.Close()
+ hardlinkChecker := new(util.HardlinkChecker)
+ itemsCopied := 0
+ for i, item := range queue {
+ // if we're not discarding the names of individual directories, keep track of this one
+ relNamePrefix := ""
+ if req.GetOptions.KeepDirectoryNames {
+ relNamePrefix = filepath.Base(item)
+ }
+ // if the named thing-to-read is a symlink, dereference it
+ info, err := os.Lstat(item)
+ if err != nil {
+ return errors.Wrapf(err, "copier: get: lstat %q", item)
+ }
+ // chase links. if we hit a dead end, we should just fail
+ followedLinks := 0
+ const maxFollowedLinks = 16
+ for info.Mode()&os.ModeType == os.ModeSymlink && followedLinks < maxFollowedLinks {
+ path, err := os.Readlink(item)
+ if err != nil {
+ continue
+ }
+ if filepath.IsAbs(path) || looksLikeAbs(path) {
+ path = filepath.Join(req.Root, path)
+ } else {
+ path = filepath.Join(filepath.Dir(item), path)
+ }
+ item = path
+ if _, err = convertToRelSubdirectory(req.Root, item); err != nil {
+ return errors.Wrapf(err, "copier: get: computing path of %q(%q) relative to %q", queue[i], item, req.Root)
+ }
+ if info, err = os.Lstat(item); err != nil {
+ return errors.Wrapf(err, "copier: get: lstat %q(%q)", queue[i], item)
+ }
+ followedLinks++
+ }
+ if followedLinks >= maxFollowedLinks {
+ return errors.Wrapf(syscall.ELOOP, "copier: get: resolving symlink %q(%q)", queue[i], item)
+ }
+ // evaluate excludes relative to the root directory
+ if info.Mode().IsDir() {
+ walkfn := func(path string, info os.FileInfo, err error) error {
+ // compute the path of this item
+ // relative to the top-level directory,
+ // for the tar header
+ rel, relErr := convertToRelSubdirectory(item, path)
+ if relErr != nil {
+ return errors.Wrapf(relErr, "copier: get: error computing path of %q relative to top directory %q", path, item)
+ }
+ if err != nil {
+ return errors.Wrapf(err, "copier: get: error reading %q", path)
+ }
+ // prefix the original item's name if we're keeping it
+ if relNamePrefix != "" {
+ rel = filepath.Join(relNamePrefix, rel)
+ }
+ if rel == "" || rel == "." {
+ // skip the "." entry
+ return nil
+ }
+ _, skip, err := pathIsExcluded(req.Root, path, pm)
+ if err != nil {
+ return err
+ }
+ if skip {
+ // don't use filepath.SkipDir
+ // here, since a more specific
+ // but-include-this for
+ // something under it might
+ // also be in the excludes list
+ return nil
+ }
+ // if it's a symlink, read its target
+ symlinkTarget := ""
+ if info.Mode()&os.ModeType == os.ModeSymlink {
+ target, err := os.Readlink(path)
+ if err != nil {
+ return errors.Wrapf(err, "copier: get: readlink(%q(%q))", rel, path)
+ }
+ symlinkTarget = target
+ }
+ // add the item to the outgoing tar stream
+ return copierHandlerGetOne(info, symlinkTarget, rel, path, req.GetOptions, tw, hardlinkChecker, idMappings)
+ }
+ // walk the directory tree, checking/adding items individually
+ if err := filepath.Walk(item, walkfn); err != nil {
+ return errors.Wrapf(err, "copier: get: %q(%q)", queue[i], item)
+ }
+ itemsCopied++
+ } else {
+ _, skip, err := pathIsExcluded(req.Root, item, pm)
+ if err != nil {
+ return err
+ }
+ if skip {
+ continue
+ }
+ // add the item to the outgoing tar stream. in
+ // cases where this was a symlink that we
+ // dereferenced, be sure to use the name of the
+ // link.
+ if err := copierHandlerGetOne(info, "", filepath.Base(queue[i]), item, req.GetOptions, tw, hardlinkChecker, idMappings); err != nil {
+ return errors.Wrapf(err, "copier: get: %q", queue[i])
+ }
+ itemsCopied++
+ }
+ }
+ if itemsCopied == 0 {
+ return errors.New("copier: get: copied no items")
+ }
+ return nil
+ }
+ return &response{Stat: statResponse.Stat, Get: getResponse{}}, cb, nil
+}
+
+func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath string, options GetOptions, tw *tar.Writer, hardlinkChecker *util.HardlinkChecker, idMappings *idtools.IDMappings) error {
+ // build the header using the name provided
+ hdr, err := tar.FileInfoHeader(srcfi, symlinkTarget)
+ if err != nil {
+ return errors.Wrapf(err, "error generating tar header for %s (%s)", contentPath, symlinkTarget)
+ }
+ if name != "" {
+ hdr.Name = filepath.ToSlash(name)
+ }
+ if options.StripSetuidBit {
+ hdr.Mode &^= cISUID
+ }
+ if options.StripSetgidBit {
+ hdr.Mode &^= cISGID
+ }
+ if options.StripStickyBit {
+ hdr.Mode &^= cISVTX
+ }
+ // read extended attributes
+ var xattrs map[string]string
+ if !options.StripXattrs {
+ xattrs, err = Lgetxattrs(contentPath)
+ if err != nil {
+ return errors.Wrapf(err, "error getting extended attributes for %q", contentPath)
+ }
+ }
+ hdr.Xattrs = xattrs // nolint:staticcheck
+ if hdr.Typeflag == tar.TypeReg {
+ // if it's an archive and we're extracting archives, read the
+ // file and spool out its contents in-line. (if we just
+ // inlined the whole file, we'd also be inlining the EOF marker
+ // it contains)
+ if options.ExpandArchives && isArchivePath(contentPath) {
+ f, err := os.Open(contentPath)
+ if err != nil {
+ return errors.Wrapf(err, "error opening %s", contentPath)
+ }
+ defer f.Close()
+ rc, _, err := compression.AutoDecompress(f)
+ if err != nil {
+ return errors.Wrapf(err, "error decompressing %s", contentPath)
+ }
+ defer rc.Close()
+ tr := tar.NewReader(rc)
+ hdr, err := tr.Next()
+ for err == nil {
+ if err = tw.WriteHeader(hdr); err != nil {
+ return errors.Wrapf(err, "error writing tar header from %q to pipe", contentPath)
+ }
+ if hdr.Size != 0 {
+ n, err := io.Copy(tw, tr)
+ if err != nil {
+ return errors.Wrapf(err, "error extracting content from archive %s: %s", contentPath, hdr.Name)
+ }
+ if n != hdr.Size {
+ return errors.Errorf("error extracting contents of archive %s: incorrect length for %q", contentPath, hdr.Name)
+ }
+ tw.Flush()
+ }
+ hdr, err = tr.Next()
+ }
+ if err != io.EOF {
+ return errors.Wrapf(err, "error extracting contents of archive %s", contentPath)
+ }
+ return nil
+ }
+ // if this regular file is hard linked to something else we've
+ // already added, set up to output a TypeLink entry instead of
+ // a TypeReg entry
+ target := hardlinkChecker.Check(srcfi)
+ if target != "" {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = filepath.ToSlash(target)
+ hdr.Size = 0
+ } else {
+ // note the device/inode pair for this file
+ hardlinkChecker.Add(srcfi, name)
+ }
+ }
+ // map the ownership for the archive
+ if idMappings != nil && !idMappings.Empty() {
+ hostPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
+ hdr.Uid, hdr.Gid, err = idMappings.ToContainer(hostPair)
+ if err != nil {
+ return errors.Wrapf(err, "error mapping host filesystem owners %#v to container filesystem owners", hostPair)
+ }
+ }
+ // output the header
+ if err = tw.WriteHeader(hdr); err != nil {
+ return errors.Wrapf(err, "error writing header for %s (%s)", contentPath, hdr.Name)
+ }
+ if hdr.Typeflag == tar.TypeReg {
+ // output the content
+ f, err := os.Open(contentPath)
+ if err != nil {
+ return errors.Wrapf(err, "error opening %s", contentPath)
+ }
+ defer f.Close()
+ n, err := io.Copy(tw, f)
+ if err != nil {
+ return errors.Wrapf(err, "error copying %s", contentPath)
+ }
+ if n != hdr.Size {
+ return errors.Errorf("error copying %s: incorrect size (expected %d bytes, read %d bytes)", contentPath, n, hdr.Size)
+ }
+ tw.Flush()
+ }
+ return nil
+}
+
+func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDMappings) (*response, func() error, error) {
+ errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Put: putResponse{}}, nil, nil
+ }
+ dirUID, dirGID := 0, 0
+ if req.PutOptions.ChownDirs != nil {
+ dirUID, dirGID = req.PutOptions.ChownDirs.UID, req.PutOptions.ChownDirs.GID
+ }
+ dirMode := os.FileMode(0755)
+ if req.PutOptions.ChmodDirs != nil {
+ dirMode = *req.PutOptions.ChmodDirs
+ }
+ var fileUID, fileGID *int
+ if req.PutOptions.ChownFiles != nil {
+ fileUID, fileGID = &req.PutOptions.ChownFiles.UID, &req.PutOptions.ChownFiles.GID
+ }
+ if idMappings != nil && !idMappings.Empty() {
+ containerDirPair := idtools.IDPair{UID: dirUID, GID: dirGID}
+ hostDirPair, err := idMappings.ToHost(containerDirPair)
+ if err != nil {
+ return errorResponse("copier: put: error mapping container filesystem owner %d:%d to host filesystem owners: %v", dirUID, dirGID, err)
+ }
+ dirUID, dirGID = hostDirPair.UID, hostDirPair.GID
+ if req.PutOptions.ChownFiles != nil {
+ containerFilePair := idtools.IDPair{UID: *fileUID, GID: *fileGID}
+ hostFilePair, err := idMappings.ToHost(containerFilePair)
+ if err != nil {
+ return errorResponse("copier: put: error mapping container filesystem owner %d:%d to host filesystem owners: %v", fileUID, fileGID, err)
+ }
+ fileUID, fileGID = &hostFilePair.UID, &hostFilePair.GID
+ }
+ }
+ ensureDirectoryUnderRoot := func(directory string) error {
+ rel, err := convertToRelSubdirectory(req.Root, directory)
+ if err != nil {
+ return errors.Wrapf(err, "%q is not a subdirectory of %q", directory, req.Root)
+ }
+ subdir := ""
+ for _, component := range strings.Split(rel, string(os.PathSeparator)) {
+ subdir = filepath.Join(subdir, component)
+ path := filepath.Join(req.Root, subdir)
+ if err := os.Mkdir(path, 0700); err == nil {
+ if err = lchown(path, dirUID, dirGID); err != nil {
+ return errors.Wrapf(err, "copier: put: error setting owner of %q to %d:%d", path, dirUID, dirGID)
+ }
+ if err = os.Chmod(path, dirMode); err != nil {
+ return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, dirMode)
+ }
+ } else {
+ if !os.IsExist(err) {
+ return errors.Wrapf(err, "copier: put: error checking directory %q", path)
+ }
+ }
+ }
+ return nil
+ }
+ createFile := func(path string, tr *tar.Reader) (int64, error) {
+ f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
+ if err != nil && os.IsExist(err) {
+ if err = os.Remove(path); err != nil {
+ return 0, errors.Wrapf(err, "copier: put: error removing file to be overwritten %q", path)
+ }
+ f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
+ }
+ if err != nil {
+ return 0, errors.Wrapf(err, "copier: put: error opening file %q for writing", path)
+ }
+ defer f.Close()
+ n, err := io.Copy(f, tr)
+ if err != nil {
+ return n, errors.Wrapf(err, "copier: put: error writing file %q", path)
+ }
+ return n, nil
+ }
+ targetDirectory, err := resolvePath(req.Root, req.Directory, nil)
+ if err != nil {
+ return errorResponse("copier: put: error resolving %q: %v", req.Directory, err)
+ }
+ info, err := os.Lstat(targetDirectory)
+ if err == nil {
+ if !info.IsDir() {
+ return errorResponse("copier: put: %s (%s): exists but is not a directory", req.Directory, targetDirectory)
+ }
+ } else {
+ if !os.IsNotExist(err) {
+ return errorResponse("copier: put: %s: %v", req.Directory, err)
+ }
+ if err := ensureDirectoryUnderRoot(req.Directory); err != nil {
+ return errorResponse("copier: put: %v", err)
+ }
+ }
+ cb := func() error {
+ type directoryAndTimes struct {
+ directory string
+ atime, mtime time.Time
+ }
+ var directoriesAndTimes []directoryAndTimes
+ defer func() {
+ for i := range directoriesAndTimes {
+ directoryAndTimes := directoriesAndTimes[len(directoriesAndTimes)-i-1]
+ if err := lutimes(false, directoryAndTimes.directory, directoryAndTimes.atime, directoryAndTimes.mtime); err != nil {
+ logrus.Debugf("error setting access and modify timestamps on %q to %s and %s: %v", directoryAndTimes.directory, directoryAndTimes.atime, directoryAndTimes.mtime, err)
+ }
+ }
+ }()
+ tr := tar.NewReader(bulkReader)
+ hdr, err := tr.Next()
+ for err == nil {
+ // figure out who should own this new item
+ if idMappings != nil && !idMappings.Empty() {
+ containerPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
+ hostPair, err := idMappings.ToHost(containerPair)
+ if err != nil {
+ return errors.Wrapf(err, "error mapping container filesystem owner 0,0 to host filesystem owners")
+ }
+ hdr.Uid, hdr.Gid = hostPair.UID, hostPair.GID
+ }
+ if hdr.Typeflag == tar.TypeDir {
+ if req.PutOptions.ChownDirs != nil {
+ hdr.Uid, hdr.Gid = dirUID, dirGID
+ }
+ } else {
+ if req.PutOptions.ChownFiles != nil {
+ hdr.Uid, hdr.Gid = *fileUID, *fileGID
+ }
+ }
+ // make sure the parent directory exists
+ path := filepath.Join(targetDirectory, cleanerReldirectory(filepath.FromSlash(hdr.Name)))
+ if err := ensureDirectoryUnderRoot(filepath.Dir(path)); err != nil {
+ return err
+ }
+ // figure out what the permissions should be
+ if hdr.Typeflag == tar.TypeDir {
+ if req.PutOptions.ChmodDirs != nil {
+ hdr.Mode = int64(*req.PutOptions.ChmodDirs)
+ }
+ } else {
+ if req.PutOptions.ChmodFiles != nil {
+ hdr.Mode = int64(*req.PutOptions.ChmodFiles)
+ }
+ }
+ // create the new item
+ devMajor := uint32(hdr.Devmajor)
+ devMinor := uint32(hdr.Devminor)
+ switch hdr.Typeflag {
+ // no type flag for sockets
+ default:
+ return errors.Errorf("unrecognized Typeflag %c", hdr.Typeflag)
+ case tar.TypeReg, tar.TypeRegA:
+ var written int64
+ written, err = createFile(path, tr)
+ if written != hdr.Size {
+ return errors.Errorf("copier: put: error creating %q: incorrect length (%d != %d)", path, written, hdr.Size)
+ }
+ case tar.TypeLink:
+ var linkTarget string
+ if linkTarget, err = resolvePath(targetDirectory, filepath.Join(req.Root, filepath.FromSlash(hdr.Linkname)), nil); err != nil {
+ return errors.Errorf("error resolving hardlink target path %q under root %q", hdr.Linkname, req.Root)
+ }
+ if err = os.Link(linkTarget, path); err != nil && os.IsExist(err) {
+ if err = os.Remove(path); err == nil {
+ err = os.Link(linkTarget, path)
+ }
+ }
+ case tar.TypeSymlink:
+ if err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path)); err != nil && os.IsExist(err) {
+ if err = os.Remove(path); err == nil {
+ err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path))
+ }
+ }
+ case tar.TypeChar:
+ if err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))); err != nil && os.IsExist(err) {
+ if err = os.Remove(path); err == nil {
+ err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor)))
+ }
+ }
+ case tar.TypeBlock:
+ if err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))); err != nil && os.IsExist(err) {
+ if err = os.Remove(path); err == nil {
+ err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor)))
+ }
+ }
+ case tar.TypeDir:
+ if err = os.Mkdir(path, 0700); err != nil && os.IsExist(err) {
+ err = nil
+ }
+ // make a note of the directory's times. we
+ // might create items under it, which will
+ // cause the mtime to change after we correct
+ // it, so we'll need to correct it again later
+ directoriesAndTimes = append(directoriesAndTimes, directoryAndTimes{
+ directory: path,
+ atime: hdr.AccessTime,
+ mtime: hdr.ModTime,
+ })
+ case tar.TypeFifo:
+ if err = mkfifo(path, 0600); err != nil && os.IsExist(err) {
+ if err = os.Remove(path); err == nil {
+ err = mkfifo(path, 0600)
+ }
+ }
+ }
+ // check for errors
+ if err != nil {
+ return errors.Wrapf(err, "copier: put: error creating %q", path)
+ }
+ // restore xattrs
+ if !req.PutOptions.StripXattrs {
+ if err = Lsetxattrs(path, hdr.Xattrs); err != nil { // nolint:staticcheck
+ if !req.PutOptions.IgnoreXattrErrors {
+ return errors.Wrapf(err, "copier: put: error setting extended attributes on %q", path)
+ }
+ }
+ }
+ // set ownership
+ if err = lchown(path, hdr.Uid, hdr.Gid); err != nil {
+ return errors.Wrapf(err, "copier: put: error setting ownership of %q to %d:%d", path, hdr.Uid, hdr.Gid)
+ }
+ // set permissions, except for symlinks, since we don't have lchmod
+ mode := os.FileMode(hdr.Mode) & os.ModePerm
+ if hdr.Typeflag != tar.TypeSymlink {
+ if err = os.Chmod(path, mode); err != nil {
+ return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, mode)
+ }
+ }
+ // set other bits that might have been reset by chown()
+ if hdr.Typeflag != tar.TypeSymlink {
+ if hdr.Mode&cISUID == cISUID {
+ mode |= syscall.S_ISUID
+ }
+ if hdr.Mode&cISGID == cISGID {
+ mode |= syscall.S_ISGID
+ }
+ if hdr.Mode&cISVTX == cISVTX {
+ mode |= syscall.S_ISVTX
+ }
+ if err = syscall.Chmod(path, uint32(mode)); err != nil {
+ return errors.Wrapf(err, "error setting additional permissions on %q to 0%o", path, mode)
+ }
+ }
+ // set time
+ if hdr.AccessTime.IsZero() || hdr.AccessTime.Before(hdr.ModTime) {
+ hdr.AccessTime = hdr.ModTime
+ }
+ if err = lutimes(hdr.Typeflag == tar.TypeSymlink, path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return errors.Wrapf(err, "error setting access and modify timestamps on %q to %s and %s", path, hdr.AccessTime, hdr.ModTime)
+ }
+ hdr, err = tr.Next()
+ }
+ if err != io.EOF {
+ return errors.Wrapf(err, "error reading tar stream: expected EOF")
+ }
+ return nil
+ }
+ return &response{Error: "", Put: putResponse{}}, cb, nil
+}
+
+func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response, func() error, error) {
+ errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Mkdir: mkdirResponse{}}, nil, nil
+ }
+ dirUID, dirGID := 0, 0
+ if req.MkdirOptions.ChownNew != nil {
+ dirUID, dirGID = req.MkdirOptions.ChownNew.UID, req.MkdirOptions.ChownNew.GID
+ }
+ dirMode := os.FileMode(0755)
+ if req.MkdirOptions.ChmodNew != nil {
+ dirMode = *req.MkdirOptions.ChmodNew
+ }
+ if idMappings != nil && !idMappings.Empty() {
+ containerDirPair := idtools.IDPair{UID: dirUID, GID: dirGID}
+ hostDirPair, err := idMappings.ToHost(containerDirPair)
+ if err != nil {
+ return errorResponse("copier: mkdir: error mapping container filesystem owner %d:%d to host filesystem owners: %v", dirUID, dirGID, err)
+ }
+ dirUID, dirGID = hostDirPair.UID, hostDirPair.GID
+ }
+
+ directory, err := resolvePath(req.Root, req.Directory, nil)
+ if err != nil {
+ return errorResponse("copier: mkdir: error resolving %q: %v", req.Directory, err)
+ }
+
+ rel, err := convertToRelSubdirectory(req.Root, directory)
+ if err != nil {
+ return errorResponse("copier: mkdir: error computing path of %q relative to %q: %v", directory, req.Root, err)
+ }
+
+ subdir := ""
+ for _, component := range strings.Split(rel, string(os.PathSeparator)) {
+ subdir = filepath.Join(subdir, component)
+ path := filepath.Join(req.Root, subdir)
+ if err := os.Mkdir(path, 0700); err == nil {
+ if err = chown(path, dirUID, dirGID); err != nil {
+ return errorResponse("copier: mkdir: error setting owner of %q to %d:%d: %v", path, dirUID, dirGID, err)
+ }
+ if err = chmod(path, dirMode); err != nil {
+ return errorResponse("copier: mkdir: error setting permissions on %q to 0%o: %v", path, dirMode)
+ }
+ } else {
+ if !os.IsExist(err) {
+ return errorResponse("copier: mkdir: error checking directory %q: %v", path, err)
+ }
+ }
+ }
+
+ return &response{Error: "", Mkdir: mkdirResponse{}}, nil, nil
+}
diff --git a/vendor/github.com/containers/buildah/copier/syscall_unix.go b/vendor/github.com/containers/buildah/copier/syscall_unix.go
new file mode 100644
index 000000000..55f2f368a
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/syscall_unix.go
@@ -0,0 +1,79 @@
+// +build !windows
+
+package copier
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+var canChroot = true
+
+func chroot(root string) (bool, error) {
+ if canChroot {
+ if err := os.Chdir(root); err != nil {
+ return false, fmt.Errorf("error changing to intended-new-root directory %q: %v", root, err)
+ }
+ if err := unix.Chroot(root); err != nil {
+ return false, fmt.Errorf("error chrooting to directory %q: %v", root, err)
+ }
+ if err := os.Chdir(string(os.PathSeparator)); err != nil {
+ return false, fmt.Errorf("error changing to just-became-root directory %q: %v", root, err)
+ }
+ return true, nil
+ }
+ return false, nil
+}
+
+func chrMode(mode os.FileMode) uint32 {
+ return uint32(unix.S_IFCHR | mode)
+}
+
+func blkMode(mode os.FileMode) uint32 {
+ return uint32(unix.S_IFBLK | mode)
+}
+
+func mkdev(major, minor uint32) uint64 {
+ return unix.Mkdev(major, minor)
+}
+
+func mkfifo(path string, mode uint32) error {
+ return unix.Mkfifo(path, mode)
+}
+
+func mknod(path string, mode uint32, dev int) error {
+ return unix.Mknod(path, mode, dev)
+}
+
+func chmod(path string, mode os.FileMode) error {
+ return os.Chmod(path, mode)
+}
+
+func chown(path string, uid, gid int) error {
+ return os.Chown(path, uid, gid)
+}
+
+func lchown(path string, uid, gid int) error {
+ return os.Lchown(path, uid, gid)
+}
+
+func lutimes(isSymlink bool, path string, atime, mtime time.Time) error {
+ if atime.IsZero() || mtime.IsZero() {
+ now := time.Now()
+ if atime.IsZero() {
+ atime = now
+ }
+ if mtime.IsZero() {
+ mtime = now
+ }
+ }
+ return unix.Lutimes(path, []unix.Timeval{unix.NsecToTimeval(atime.UnixNano()), unix.NsecToTimeval(mtime.UnixNano())})
+}
+
+const (
+ testModeMask = int64(os.ModePerm)
+ testIgnoreSymlinkDates = false
+)
diff --git a/vendor/github.com/containers/buildah/copier/syscall_windows.go b/vendor/github.com/containers/buildah/copier/syscall_windows.go
new file mode 100644
index 000000000..be50d473d
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/syscall_windows.go
@@ -0,0 +1,83 @@
+// +build windows
+
+package copier
+
+import (
+ "errors"
+ "os"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+var canChroot = false
+
+func chroot(path string) (bool, error) {
+ return false, nil
+}
+
+func chrMode(mode os.FileMode) uint32 {
+ return windows.S_IFCHR | uint32(mode)
+}
+
+func blkMode(mode os.FileMode) uint32 {
+ return windows.S_IFBLK | uint32(mode)
+}
+
+func mkdev(major, minor uint32) uint64 {
+ return 0
+}
+
+func mkfifo(path string, mode uint32) error {
+ return syscall.ENOSYS
+}
+
+func mknod(path string, mode uint32, dev int) error {
+ return syscall.ENOSYS
+}
+
+func chmod(path string, mode os.FileMode) error {
+ err := os.Chmod(path, mode)
+ if err != nil && errors.Is(err, syscall.EWINDOWS) {
+ return nil
+ }
+ return err
+}
+
+func chown(path string, uid, gid int) error {
+ err := os.Chown(path, uid, gid)
+ if err != nil && errors.Is(err, syscall.EWINDOWS) {
+ return nil
+ }
+ return err
+}
+
+func lchown(path string, uid, gid int) error {
+ err := os.Lchown(path, uid, gid)
+ if err != nil && errors.Is(err, syscall.EWINDOWS) {
+ return nil
+ }
+ return err
+}
+
+func lutimes(isSymlink bool, path string, atime, mtime time.Time) error {
+ if isSymlink {
+ return nil
+ }
+ if atime.IsZero() || mtime.IsZero() {
+ now := time.Now()
+ if atime.IsZero() {
+ atime = now
+ }
+ if mtime.IsZero() {
+ mtime = now
+ }
+ }
+ return windows.UtimesNano(path, []windows.Timespec{windows.NsecToTimespec(atime.UnixNano()), windows.NsecToTimespec(mtime.UnixNano())})
+}
+
+const (
+ testModeMask = int64(0600)
+ testIgnoreSymlinkDates = true
+)
diff --git a/vendor/github.com/containers/buildah/copier/unwrap_112.go b/vendor/github.com/containers/buildah/copier/unwrap_112.go
new file mode 100644
index 000000000..ebbad08b9
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/unwrap_112.go
@@ -0,0 +1,11 @@
+// +build !go113
+
+package copier
+
+import (
+ "github.com/pkg/errors"
+)
+
+func unwrapError(err error) error {
+ return errors.Cause(err)
+}
diff --git a/vendor/github.com/containers/buildah/copier/unwrap_113.go b/vendor/github.com/containers/buildah/copier/unwrap_113.go
new file mode 100644
index 000000000..cd0d0fb68
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/unwrap_113.go
@@ -0,0 +1,18 @@
+// +build go113
+
+package copier
+
+import (
+ stderror "errors"
+
+ "github.com/pkg/errors"
+)
+
+func unwrapError(err error) error {
+ e := errors.Cause(err)
+ for e != nil {
+ err = e
+ e = errors.Unwrap(err)
+ }
+ return err
+}
diff --git a/vendor/github.com/containers/buildah/copier/xattrs.go b/vendor/github.com/containers/buildah/copier/xattrs.go
new file mode 100644
index 000000000..71769989c
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/xattrs.go
@@ -0,0 +1,92 @@
+// +build linux netbsd freebsd darwin
+
+package copier
+
+import (
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/pkg/errors"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ xattrsSupported = true
+)
+
+var (
+ relevantAttributes = []string{"security.capability", "security.ima", "user.*"} // the attributes that we preserve - we discard others
+)
+
+// isRelevantXattr checks if "attribute" matches one of the attribute patterns
+// listed in the "relevantAttributes" list.
+func isRelevantXattr(attribute string) bool {
+ for _, relevant := range relevantAttributes {
+ matched, err := filepath.Match(relevant, attribute)
+ if err != nil || !matched {
+ continue
+ }
+ return true
+ }
+ return false
+}
+
+// Lgetxattrs returns a map of the relevant extended attributes set on the given file.
+func Lgetxattrs(path string) (map[string]string, error) {
+ maxSize := 64 * 1024 * 1024
+ listSize := 64 * 1024
+ var list []byte
+ for listSize < maxSize {
+ list = make([]byte, listSize)
+ size, err := unix.Llistxattr(path, list)
+ if err != nil {
+ if unwrapError(err) == syscall.ERANGE {
+ listSize *= 2
+ continue
+ }
+ return nil, errors.Wrapf(err, "error listing extended attributes of %q", path)
+ }
+ list = list[:size]
+ break
+ }
+ if listSize >= maxSize {
+ return nil, errors.Errorf("unable to read list of attributes for %q: size would have been too big", path)
+ }
+ m := make(map[string]string)
+ for _, attribute := range strings.Split(string(list), string('\000')) {
+ if isRelevantXattr(attribute) {
+ attributeSize := 64 * 1024
+ var attributeValue []byte
+ for attributeSize < maxSize {
+ attributeValue = make([]byte, attributeSize)
+ size, err := unix.Lgetxattr(path, attribute, attributeValue)
+ if err != nil {
+ if unwrapError(err) == syscall.ERANGE {
+ attributeSize *= 2
+ continue
+ }
+ return nil, errors.Wrapf(err, "error getting value of extended attribute %q on %q", attribute, path)
+ }
+ m[attribute] = string(attributeValue[:size])
+ break
+ }
+ if attributeSize >= maxSize {
+ return nil, errors.Errorf("unable to read attribute %q of %q: size would have been too big", attribute, path)
+ }
+ }
+ }
+ return m, nil
+}
+
+// Lsetxattrs sets the relevant members of the specified extended attributes on the given file.
+func Lsetxattrs(path string, xattrs map[string]string) error {
+ for attribute, value := range xattrs {
+ if isRelevantXattr(attribute) {
+ if err := unix.Lsetxattr(path, attribute, []byte(value), 0); err != nil {
+ return errors.Wrapf(err, "error setting value of extended attribute %q on %q", attribute, path)
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/buildah/copier/xattrs_unsupported.go b/vendor/github.com/containers/buildah/copier/xattrs_unsupported.go
new file mode 100644
index 000000000..750d842f8
--- /dev/null
+++ b/vendor/github.com/containers/buildah/copier/xattrs_unsupported.go
@@ -0,0 +1,15 @@
+// +build !linux,!netbsd,!freebsd,!darwin
+
+package copier
+
+const (
+ xattrsSupported = false
+)
+
+func Lgetxattrs(path string) (map[string]string, error) {
+ return nil, nil
+}
+
+func Lsetxattrs(path string, xattrs map[string]string) error {
+ return nil
+}
diff --git a/vendor/github.com/containers/buildah/digester.go b/vendor/github.com/containers/buildah/digester.go
index ff1bef2f5..870ab8d98 100644
--- a/vendor/github.com/containers/buildah/digester.go
+++ b/vendor/github.com/containers/buildah/digester.go
@@ -6,6 +6,7 @@ import (
"hash"
"io"
"sync"
+ "time"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@@ -82,6 +83,10 @@ func (t *tarFilterer) Close() error {
// newTarFilterer passes one or more tar archives through to an io.WriteCloser
// as a single archive, potentially calling filter to modify headers and
// contents as it goes.
+//
+// Note: if "filter" indicates that a given item should be skipped, there is no
+// guarantee that there will not be a subsequent item of type TypeLink, which
+// is a hard link, which points to the skipped item as the link target.
func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader)) io.WriteCloser {
pipeReader, pipeWriter := io.Pipe()
tarWriter := tar.NewWriter(writeCloser)
@@ -153,12 +158,20 @@ type tarDigester struct {
tarFilterer io.WriteCloser
}
+func modifyTarHeaderForDigesting(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader) {
+ zeroTime := time.Time{}
+ hdr.ModTime = zeroTime
+ hdr.AccessTime = zeroTime
+ hdr.ChangeTime = zeroTime
+ return false, false, nil
+}
+
func newTarDigester(contentType string) digester {
nested := newSimpleDigester(contentType)
digester := &tarDigester{
isOpen: true,
nested: nested,
- tarFilterer: nested,
+ tarFilterer: newTarFilterer(nested, modifyTarHeaderForDigesting),
}
return digester
}
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 9e692546b..fac079e45 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -4,11 +4,10 @@ go 1.12
require (
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784
- github.com/containers/common v0.19.0
- github.com/containers/image/v5 v5.5.1
+ github.com/containers/common v0.21.0
+ github.com/containers/image/v5 v5.5.2
github.com/containers/ocicrypt v1.0.3
- github.com/containers/storage v1.23.0
- github.com/cyphar/filepath-securejoin v0.2.2
+ github.com/containers/storage v1.23.3
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/go-units v0.4.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
@@ -27,8 +26,7 @@ require (
github.com/opencontainers/selinux v1.6.0
github.com/openshift/imagebuilder v1.1.6
github.com/pkg/errors v0.9.1
- github.com/seccomp/containers-golang v0.6.0
- github.com/seccomp/libseccomp-golang v0.9.1
+ github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf
github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v0.0.7
github.com/spf13/pflag v1.0.5
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index e7d10f739..463f2bdcc 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -52,10 +52,10 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 h1:rqUVLD8I859xRgUx/WMC3v7QAFqbLKZbs+0kqYboRJc=
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containers/common v0.19.0 h1:nya/Fh51kiyV0cAO31ejoNwvRAeYreymsO820yjfc3Y=
-github.com/containers/common v0.19.0/go.mod h1:+NUHV8V5Kmo260ja9Dxtr8ialrDnK4RNzyeEbSgmLac=
-github.com/containers/image/v5 v5.5.1 h1:h1FCOXH6Ux9/p/E4rndsQOC4yAdRU0msRTfLVeQ7FDQ=
-github.com/containers/image/v5 v5.5.1/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
+github.com/containers/common v0.21.0 h1:v2U9MrGw0vMgefQf0/uJYBsSnengxLbSORYqhCVEBs0=
+github.com/containers/common v0.21.0/go.mod h1:8w8SVwc+P2p1MOnRMbSKNWXt1Iwd2bKFu2LLZx55DTM=
+github.com/containers/image/v5 v5.5.2 h1:fv7FArz0zUnjH0W0l8t90CqWFlFcQrPP6Pug+9dUtVI=
+github.com/containers/image/v5 v5.5.2/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.2 h1:Q0/IPs8ohfbXNxEfyJ2pFVmvJu5BhqJUAmc6ES9NKbo=
@@ -64,8 +64,8 @@ github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6Gz
github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
github.com/containers/storage v1.20.2 h1:tw/uKRPDnmVrluIzer3dawTFG/bTJLP8IEUyHFhltYk=
github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc=
-github.com/containers/storage v1.23.0 h1:gYyNkBiihC2FvGiHOjOjpnfojYwgxpLVooTUlmD6pxs=
-github.com/containers/storage v1.23.0/go.mod h1:I1EIAA7B4OwWRSA0b4yq2AW1wjvvfcY0zLWQuwTa4zw=
+github.com/containers/storage v1.23.3 h1:6ZeQi+xKBXrbUXSSZvSs8HuKoNCPfRkXR4f+8TkiMsI=
+github.com/containers/storage v1.23.3/go.mod h1:0azTMiuBhArp/VUmH1o4DJAGaaH+qLtEu17pJ/iKJCg=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@@ -185,8 +185,8 @@ github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqy
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.8 h1:eLeJ3dr/Y9+XRfJT4l+8ZjmtB5RPJhucH2HeCV5+IZY=
github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.10.10 h1:a/y8CglcM7gLGYmlbP/stPE5sR3hbhFRUjCBfd/0B3I=
-github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.10.11 h1:K9z59aO18Aywg2b/WSgBaUX99mHy2BES18Cr5lBKZHk=
+github.com/klauspost/compress v1.10.11/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
@@ -304,10 +304,10 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/seccomp/containers-golang v0.6.0 h1:VWPMMIDr8pAtNjCX0WvLEEK9EQi5lAm4HtJbDtAtFvQ=
-github.com/seccomp/containers-golang v0.6.0/go.mod h1:Dd9mONHvW4YdbSzdm23yf2CFw0iqvqLhO0mEFvPIvm4=
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf h1:b0+ZBD3rohnkQ4q5duD1+RyTXTg9yk+qTOPMSQtapO0=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
@@ -435,7 +435,6 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 h1:sIky/MyNRSHTrdxfsiUSS4WIAMvInbeXljJz+jDjeYE=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 8ca94924a..b2c95fecd 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -13,6 +13,7 @@ import (
"strings"
"time"
+ "github.com/containers/buildah/copier"
"github.com/containers/buildah/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
@@ -21,6 +22,7 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go"
@@ -50,7 +52,7 @@ type containerImageRef struct {
layerID string
oconfig []byte
dconfig []byte
- created time.Time
+ created *time.Time
createdBy string
historyComment string
annotations map[string]string
@@ -58,7 +60,7 @@ type containerImageRef struct {
exporting bool
squash bool
emptyLayer bool
- tarPath func(path string) (io.ReadCloser, error)
+ idMappingOptions *IDMappingOptions
parent string
blobDirectory string
preEmptyLayers []v1.History
@@ -142,16 +144,25 @@ func computeLayerMIMEType(what string, layerCompression archive.Compression) (om
// Extract the container's whole filesystem as if it were a single layer.
func (i *containerImageRef) extractRootfs() (io.ReadCloser, error) {
+ var uidMap, gidMap []idtools.IDMap
mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
if err != nil {
return nil, errors.Wrapf(err, "error mounting container %q", i.containerID)
}
- rc, err := i.tarPath(mountPoint)
- if err != nil {
- return nil, errors.Wrapf(err, "error extracting rootfs from container %q", i.containerID)
- }
- return ioutils.NewReadCloserWrapper(rc, func() error {
- if err = rc.Close(); err != nil {
+ pipeReader, pipeWriter := io.Pipe()
+ go func() {
+ if i.idMappingOptions != nil {
+ uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap)
+ }
+ copierOptions := copier.GetOptions{
+ UIDMap: uidMap,
+ GIDMap: gidMap,
+ }
+ err = copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter)
+ pipeWriter.Close()
+ }()
+ return ioutils.NewReadCloserWrapper(pipeReader, func() error {
+ if err = pipeReader.Close(); err != nil {
err = errors.Wrapf(err, "error closing tar archive of container %q", i.containerID)
}
if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
@@ -167,7 +178,10 @@ func (i *containerImageRef) extractRootfs() (io.ReadCloser, error) {
// Build fresh copies of the container configuration structures so that we can edit them
// without making unintended changes to the original Builder.
func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) {
- created := i.created
+ created := time.Now().UTC()
+ if i.created != nil {
+ created = *i.created
+ }
// Build an empty image, and then decode over it.
oimage := v1.Image{}
@@ -285,7 +299,6 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
if err != nil {
return nil, err
}
- omitTimestamp := i.created.Equal(time.Unix(0, 0))
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
for _, layerID := range layers {
@@ -375,9 +388,9 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
return nil, errors.Wrapf(err, "error compressing %s", what)
}
writer := io.MultiWriter(writeCloser, srcHasher.Hash())
- // Zero out timestamps in the layer, if we're doing that for
+ // Use specified timestamps in the layer, if we're doing that for
// history entries.
- if omitTimestamp {
+ if i.created != nil {
nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) {
// Changing a zeroed field to a non-zero field
@@ -388,13 +401,13 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// changing the length) of the header that we
// write.
if !hdr.ModTime.IsZero() {
- hdr.ModTime = i.created
+ hdr.ModTime = *i.created
}
if !hdr.AccessTime.IsZero() {
- hdr.AccessTime = i.created
+ hdr.AccessTime = *i.created
}
if !hdr.ChangeTime.IsZero() {
- hdr.ChangeTime = i.created
+ hdr.ChangeTime = *i.created
}
return false, false, nil
})
@@ -414,7 +427,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
} else {
size = counter.Count
}
- logrus.Debugf("%s size is %d bytes", what, size)
+ logrus.Debugf("%s size is %d bytes, uncompressed digest %s, possibly-compressed digest %s", what, size, srcHasher.Digest().String(), destHasher.Digest().String())
// Rename the layer so that we can more easily find it by digest later.
finalBlobName := filepath.Join(path, destHasher.Digest().String())
if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
@@ -469,8 +482,12 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
}
appendHistory(i.preEmptyLayers)
+ created := time.Now().UTC()
+ if i.created != nil {
+ created = (*i.created).UTC()
+ }
onews := v1.History{
- Created: &i.created,
+ Created: &created,
CreatedBy: i.createdBy,
Author: oimage.Author,
Comment: i.historyComment,
@@ -478,7 +495,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
oimage.History = append(oimage.History, onews)
dnews := docker.V2S2History{
- Created: i.created,
+ Created: created,
CreatedBy: i.createdBy,
Author: dimage.Author,
Comment: i.historyComment,
@@ -693,9 +710,10 @@ func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.Ima
if err != nil {
return nil, errors.Wrapf(err, "error encoding docker-format image configuration %#v", b.Docker)
}
- created := time.Now().UTC()
+ var created *time.Time
if options.HistoryTimestamp != nil {
- created = options.HistoryTimestamp.UTC()
+ historyTimestampUTC := options.HistoryTimestamp.UTC()
+ created = &historyTimestampUTC
}
createdBy := b.CreatedBy()
if createdBy == "" {
@@ -705,10 +723,6 @@ func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.Ima
}
}
- if options.OmitTimestamp {
- created = time.Unix(0, 0).UTC()
- }
-
parent := ""
if b.FromImageID != "" {
parentDigest := digest.NewDigestFromEncoded(digest.Canonical, b.FromImageID)
@@ -735,12 +749,11 @@ func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.Ima
exporting: exporting,
squash: options.Squash,
emptyLayer: options.EmptyLayer && !options.Squash,
- tarPath: b.tarPath(&b.IDMappingOptions),
+ idMappingOptions: &b.IDMappingOptions,
parent: parent,
blobDirectory: options.BlobDirectory,
preEmptyLayers: b.PrependedEmptyLayers,
postEmptyLayers: b.AppendedEmptyLayers,
}
-
return ref, nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 185c93ad3..a8ada90d1 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -168,9 +168,9 @@ type BuildOptions struct {
SignBy string
// Architecture specifies the target architecture of the image to be built.
Architecture string
- // OmitTimestamp forces epoch 0 as created timestamp to allow for
- // deterministic, content-addressable builds.
- OmitTimestamp bool
+ // Timestamp sets the created timestamp to the specified time, allowing
+ // for deterministic, content-addressable builds.
+ Timestamp *time.Time
// OS is the specifies the operating system of the image to be built.
OS string
// MaxPullPushRetries is the maximum number of attempts we'll make to pull or push any one
@@ -183,6 +183,8 @@ type BuildOptions struct {
OciDecryptConfig *encconfig.DecryptConfig
// Jobs is the number of stages to run in parallel. If not specified it defaults to 1.
Jobs *int
+ // LogRusage logs resource usage for each step.
+ LogRusage bool
}
// BuildDockerfiles parses a set of one or more Dockerfiles (which may be
diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go
index f3ef584e6..b0ec1cda0 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go
@@ -24,6 +24,7 @@ import (
encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
+ digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/openshift/imagebuilder"
@@ -100,7 +101,7 @@ type Executor struct {
devices []configs.Device
signBy string
architecture string
- omitTimestamp bool
+ timestamp *time.Time
os string
maxPullPushRetries int
retryPullPushDelay time.Duration
@@ -110,6 +111,7 @@ type Executor struct {
stagesLock sync.Mutex
stagesSemaphore *semaphore.Weighted
jobs int
+ logRusage bool
}
// NewExecutor creates a new instance of the imagebuilder.Executor interface.
@@ -152,6 +154,11 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
jobs = *options.Jobs
}
+ writer := options.ReportWriter
+ if options.Quiet {
+ writer = ioutil.Discard
+ }
+
exec := Executor{
stages: make(map[string]*StageExecutor),
store: store,
@@ -174,7 +181,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
in: options.In,
out: options.Out,
err: options.Err,
- reportWriter: options.ReportWriter,
+ reportWriter: writer,
isolation: options.Isolation,
namespaceOptions: options.NamespaceOptions,
configureNetwork: options.ConfigureNetwork,
@@ -201,13 +208,14 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
devices: devices,
signBy: options.SignBy,
architecture: options.Architecture,
- omitTimestamp: options.OmitTimestamp,
+ timestamp: options.Timestamp,
os: options.OS,
maxPullPushRetries: options.MaxPullPushRetries,
retryPullPushDelay: options.PullPushRetryDelay,
ociDecryptConfig: options.OciDecryptConfig,
terminatedStage: make(map[string]struct{}),
jobs: jobs,
+ logRusage: options.LogRusage,
}
if exec.err == nil {
exec.err = os.Stderr
@@ -328,22 +336,22 @@ func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebu
}
}
-// getImageHistory returns the history of imageID.
-func (b *Executor) getImageHistory(ctx context.Context, imageID string) ([]v1.History, error) {
+// getImageHistoryAndDiffIDs returns the history and diff IDs list of imageID.
+func (b *Executor) getImageHistoryAndDiffIDs(ctx context.Context, imageID string) ([]v1.History, []digest.Digest, error) {
imageRef, err := is.Transport.ParseStoreReference(b.store, "@"+imageID)
if err != nil {
- return nil, errors.Wrapf(err, "error getting image reference %q", imageID)
+ return nil, nil, errors.Wrapf(err, "error getting image reference %q", imageID)
}
ref, err := imageRef.NewImage(ctx, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error creating new image from reference to image %q", imageID)
+ return nil, nil, errors.Wrapf(err, "error creating new image from reference to image %q", imageID)
}
defer ref.Close()
oci, err := ref.OCIConfig(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "error getting possibly-converted OCI config of image %q", imageID)
+ return nil, nil, errors.Wrapf(err, "error getting possibly-converted OCI config of image %q", imageID)
}
- return oci.History, nil
+ return oci.History, oci.RootFS.DiffIDs, nil
}
func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, err error) {
diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
index f9cf2312a..0b1db01a3 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
@@ -12,8 +12,9 @@ import (
"time"
"github.com/containers/buildah"
+ "github.com/containers/buildah/copier"
buildahdocker "github.com/containers/buildah/docker"
- "github.com/containers/buildah/pkg/chrootuser"
+ "github.com/containers/buildah/pkg/rusage"
"github.com/containers/buildah/util"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker/reference"
@@ -23,8 +24,8 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
- securejoin "github.com/cyphar/filepath-securejoin"
docker "github.com/fsouza/go-dockerclient"
+ digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/openshift/imagebuilder"
"github.com/openshift/imagebuilder/dockerfile/parser"
@@ -55,7 +56,6 @@ type StageExecutor struct {
volumeCache map[string]string
volumeCacheInfo map[string]os.FileInfo
mountPoint string
- copyFrom string // Used to keep track of the --from flag from COPY and ADD
output string
containerIDs []string
stage *imagebuilder.Stage
@@ -258,166 +258,11 @@ func (s *StageExecutor) volumeCacheRestore() error {
return nil
}
-// digestSpecifiedContent digests any content that this next instruction would add to
-// the image, returning the digester if there is any, or nil otherwise. We
-// don't care about the details of where in the filesystem the content actually
-// goes, because we're not actually going to add it here, so this is less
-// involved than Copy().
-func (s *StageExecutor) digestSpecifiedContent(ctx context.Context, node *parser.Node, argValues []string, envValues []string) (string, error) {
- // No instruction: done.
- if node == nil {
- return "", nil
- }
-
- // Not adding content: done.
- switch strings.ToUpper(node.Value) {
- default:
- return "", nil
- case "ADD", "COPY":
- }
-
- // Pull out everything except the first node (the instruction) and the
- // last node (the destination).
- var srcs []string
- destination := node
- for destination.Next != nil {
- destination = destination.Next
- if destination.Next != nil {
- srcs = append(srcs, destination.Value)
- }
- }
-
- var sources []string
- var idMappingOptions *buildah.IDMappingOptions
- contextDir := s.executor.contextDir
- for _, flag := range node.Flags {
- if strings.HasPrefix(flag, "--from=") {
- // Flag says to read the content from another
- // container. Update the ID mappings and
- // all-content-comes-from-below-this-directory value.
- from := strings.TrimPrefix(flag, "--from=")
-
- // If from has an argument within it, resolve it to its
- // value. Otherwise just return the value found.
- var fromErr error
- from, fromErr = imagebuilder.ProcessWord(from, s.stage.Builder.Arguments())
- if fromErr != nil {
- return "", errors.Wrapf(fromErr, "unable to resolve argument %q", from)
- }
- if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil {
- return "", err
- }
- if other, ok := s.executor.stages[from]; ok && other.index < s.index {
- contextDir = other.mountPoint
- idMappingOptions = &other.builder.IDMappingOptions
- } else if builder, ok := s.executor.containerMap[from]; ok {
- contextDir = builder.MountPoint
- idMappingOptions = &builder.IDMappingOptions
- } else {
- return "", errors.Errorf("the stage %q has not been built", from)
- }
- }
- }
-
- varValues := append(argValues, envValues...)
- for _, src := range srcs {
- // If src has an argument within it, resolve it to its
- // value. Otherwise just return the value found.
- name, err := imagebuilder.ProcessWord(src, varValues)
- if err != nil {
- return "", errors.Wrapf(err, "unable to resolve source %q", src)
- }
- src = name
- if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
- // Source is a URL. TODO: cache this content
- // somewhere, so that we can avoid pulling it down
- // again if we end up needing to drop it into the
- // filesystem.
- sources = append(sources, src)
- } else {
- // Source is not a URL, so it's a location relative to
- // the all-content-comes-from-below-this-directory
- // directory. Also raise an error if the src escapes
- // the context directory.
- contextSrc, err := securejoin.SecureJoin(contextDir, src)
- if err == nil && strings.HasPrefix(src, "../") {
- err = errors.New("escaping context directory error")
- }
- if err != nil {
- return "", errors.Wrapf(err, "forbidden path for %q, it is outside of the build context %q", src, contextDir)
- }
- sources = append(sources, contextSrc)
- }
- }
- // If the all-content-comes-from-below-this-directory is the build
- // context, read its .dockerignore.
- var excludes []string
- if contextDir == s.executor.contextDir {
- var err error
- if excludes, err = imagebuilder.ParseDockerignore(contextDir); err != nil {
- return "", errors.Wrapf(err, "error parsing .dockerignore in %s", contextDir)
- }
- }
- // Restart the digester and have it do a dry-run copy to compute the
- // digest information.
- options := buildah.AddAndCopyOptions{
- Excludes: excludes,
- ContextDir: contextDir,
- IDMappingOptions: idMappingOptions,
- DryRun: true,
- }
- s.builder.ContentDigester.Restart()
- download := strings.ToUpper(node.Value) == "ADD"
-
- // If destination.Value has an argument within it, resolve it to its
- // value. Otherwise just return the value found.
- destValue, destErr := imagebuilder.ProcessWord(destination.Value, varValues)
- if destErr != nil {
- return "", errors.Wrapf(destErr, "unable to resolve destination %q", destination.Value)
- }
- err := s.builder.Add(destValue, download, options, sources...)
- if err != nil {
- return "", errors.Wrapf(err, "error dry-running %q", node.Original)
- }
- // Return the formatted version of the digester's result.
- contentDigest := ""
- prefix, digest := s.builder.ContentDigester.Digest()
- if prefix != "" {
- prefix += ":"
- }
- if digest.Validate() == nil {
- contentDigest = prefix + digest.Encoded()
- }
- return contentDigest, nil
-}
-
// Copy copies data into the working tree. The "Download" field is how
// imagebuilder tells us the instruction was "ADD" and not "COPY".
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
s.builder.ContentDigester.Restart()
for _, copy := range copies {
- // Check the file and see if part of it is a symlink.
- // Convert it to the target if so. To be ultrasafe
- // do the same for the mountpoint.
- hadFinalPathSeparator := len(copy.Dest) > 0 && copy.Dest[len(copy.Dest)-1] == os.PathSeparator
- secureMountPoint, err := securejoin.SecureJoin("", s.mountPoint)
- if err != nil {
- return errors.Wrapf(err, "error resolving symlinks for copy destination %s", copy.Dest)
- }
- finalPath, err := securejoin.SecureJoin(secureMountPoint, copy.Dest)
- if err != nil {
- return errors.Wrapf(err, "error resolving symlinks for copy destination %s", copy.Dest)
- }
- if !strings.HasPrefix(finalPath, secureMountPoint) {
- return errors.Wrapf(err, "error resolving copy destination %s", copy.Dest)
- }
- copy.Dest = strings.TrimPrefix(finalPath, secureMountPoint)
- if len(copy.Dest) == 0 || copy.Dest[len(copy.Dest)-1] != os.PathSeparator {
- if hadFinalPathSeparator {
- copy.Dest += string(os.PathSeparator)
- }
- }
-
if copy.Download {
logrus.Debugf("ADD %#v, %#v", excludes, copy)
} else {
@@ -432,12 +277,21 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
// all-content-comes-from-below-this-directory value.
var idMappingOptions *buildah.IDMappingOptions
var copyExcludes []string
+ stripSetuid := false
+ stripSetgid := false
+ preserveOwnership := false
contextDir := s.executor.contextDir
if len(copy.From) > 0 {
- if isStage, err := s.executor.waitForStage(s.ctx, copy.From, s.stages[:s.index]); isStage && err != nil {
+ // If from has an argument within it, resolve it to its
+ // value. Otherwise just return the value found.
+ from, fromErr := imagebuilder.ProcessWord(copy.From, s.stage.Builder.Arguments())
+ if fromErr != nil {
+ return errors.Wrapf(fromErr, "unable to resolve argument %q", copy.From)
+ }
+ if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
return err
}
- if other, ok := s.executor.stages[copy.From]; ok && other.index < s.index {
+ if other, ok := s.executor.stages[from]; ok && other.index < s.index {
contextDir = other.mountPoint
idMappingOptions = &other.builder.IDMappingOptions
} else if builder, ok := s.executor.containerMap[copy.From]; ok {
@@ -446,9 +300,12 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
} else {
return errors.Errorf("the stage %q has not been built", copy.From)
}
+ preserveOwnership = true
copyExcludes = excludes
} else {
copyExcludes = append(s.executor.excludes, excludes...)
+ stripSetuid = true // did this change between 18.06 and 19.03?
+ stripSetgid = true // did this change between 18.06 and 19.03?
}
for _, src := range copy.Src {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
@@ -460,53 +317,20 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
return errors.Errorf("source can't be a URL for COPY")
}
} else {
- // Treat the source, which is not a URL, as a
- // location relative to the
- // all-content-comes-from-below-this-directory
- // directory. Also raise an error if the src
- // escapes the context directory.
- srcSecure, err := securejoin.SecureJoin(contextDir, src)
- if err == nil && strings.HasPrefix(src, "../") {
- err = errors.New("escaping context directory error")
- }
- if err != nil {
- return errors.Wrapf(err, "forbidden path for %q, it is outside of the build context %q", src, contextDir)
- }
- if hadFinalPathSeparator {
- // If destination is a folder, we need to take extra care to
- // ensure that files are copied with correct names (since
- // resolving a symlink may result in a different name).
- _, srcName := filepath.Split(src)
- _, srcNameSecure := filepath.Split(srcSecure)
- if srcName != srcNameSecure {
- options := buildah.AddAndCopyOptions{
- Chown: copy.Chown,
- ContextDir: contextDir,
- Excludes: copyExcludes,
- IDMappingOptions: idMappingOptions,
- }
- // If we've a tar file, it will create a directory using the name of the tar
- // file if we don't blank it out.
- if strings.HasSuffix(srcName, ".tar") || strings.HasSuffix(srcName, ".gz") {
- srcName = ""
- }
- if err := s.builder.Add(filepath.Join(copy.Dest, srcName), copy.Download, options, srcSecure); err != nil {
- return err
- }
- continue
- }
- }
- sources = append(sources, srcSecure)
+ sources = append(sources, filepath.Join(contextDir, src))
}
}
options := buildah.AddAndCopyOptions{
- Chown: copy.Chown,
- ContextDir: contextDir,
- Excludes: copyExcludes,
- IDMappingOptions: idMappingOptions,
+ Chown: copy.Chown,
+ PreserveOwnership: preserveOwnership,
+ ContextDir: contextDir,
+ Excludes: copyExcludes,
+ IDMappingOptions: idMappingOptions,
+ StripSetuidBit: stripSetuid,
+ StripSetgidBit: stripSetgid,
}
if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil {
- return err
+ return errors.Wrapf(err, "error adding sources %v", sources)
}
}
return nil
@@ -767,6 +591,7 @@ func (s *StageExecutor) getImageRootfs(ctx context.Context, image string) (mount
// Execute runs each of the steps in the stage's parsed tree, in turn.
func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, ref reference.Canonical, err error) {
+ var resourceUsage rusage.Rusage
stage := s.stage
ib := stage.Builder
checkForLayers := s.executor.layers && s.executor.useCache
@@ -789,6 +614,30 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
s.executor.stagesLock.Unlock()
+ // Set things up so that we can log resource usage as we go.
+ logRusage := func() {
+ if rusage.Supported() {
+ usage, err := rusage.Get()
+ if err != nil {
+ fmt.Fprintf(s.executor.out, "error gathering resource usage information: %v\n", err)
+ return
+ }
+ if !s.executor.quiet && s.executor.logRusage {
+ fmt.Fprintf(s.executor.out, "%s\n", rusage.FormatDiff(usage.Subtract(resourceUsage)))
+ }
+ resourceUsage = usage
+ }
+ }
+
+ // Start counting resource usage before we potentially pull a base image.
+ if rusage.Supported() {
+ if resourceUsage, err = rusage.Get(); err != nil {
+ return "", nil, err
+ }
+ // Log the final incremental resource usage counter before we return.
+ defer logRusage()
+ }
+
// Create the (first) working container for this stage. Reinitializing
// the imagebuilder configuration may alter the list of steps we have,
// so take a snapshot of them *after* that.
@@ -824,7 +673,6 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
imgID = imgID[0:11]
}
if s.executor.iidfile == "" {
-
fmt.Fprintf(s.executor.out, "--> %s\n", imgID)
}
}
@@ -859,6 +707,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
for i, node := range children {
+ logRusage()
moreInstructions := i < len(children)-1
lastInstruction := !moreInstructions
// Resolve any arguments in this instruction.
@@ -871,11 +720,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
s.executor.log("%s", step.Original)
}
- // Check if there's a --from if the step command is COPY or
- // ADD. Set copyFrom to point to either the context directory
- // or the root of the container from the specified stage.
+ // Check if there's a --from if the step command is COPY.
// Also check the chown flag for validity.
- s.copyFrom = s.executor.contextDir
for _, flag := range step.Flags {
command := strings.ToUpper(step.Command)
// chown and from flags should have an '=' sign, '--chown=' or '--from='
@@ -886,31 +732,27 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, errors.Errorf("ADD only supports the --chown=<uid:gid> flag")
}
if strings.Contains(flag, "--from") && command == "COPY" {
- var mountPoint string
arr := strings.Split(flag, "=")
if len(arr) != 2 {
return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
}
- // If the source's name corresponds to the
- // result of an earlier stage, wait for that
- // stage to finish being built.
-
// If arr[1] has an argument within it, resolve it to its
// value. Otherwise just return the value found.
- var arr1Err error
- arr[1], arr1Err = imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
- if arr1Err != nil {
- return "", nil, errors.Wrapf(arr1Err, "unable to resolve argument %q", arr[1])
+ from, fromErr := imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
+ if fromErr != nil {
+ return "", nil, errors.Wrapf(fromErr, "unable to resolve argument %q", arr[1])
}
- if isStage, err := s.executor.waitForStage(ctx, arr[1], s.stages[:s.index]); isStage && err != nil {
+ // If the source's name corresponds to the
+ // result of an earlier stage, wait for that
+ // stage to finish being built.
+ if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil {
return "", nil, err
}
- if otherStage, ok := s.executor.stages[arr[1]]; ok && otherStage.index < s.index {
- mountPoint = otherStage.mountPoint
- } else if mountPoint, err = s.getImageRootfs(ctx, arr[1]); err != nil {
- return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, arr[1])
+ if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
+ break
+ } else if _, err = s.getImageRootfs(ctx, from); err != nil {
+ return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, from)
}
- s.copyFrom = mountPoint
break
}
}
@@ -933,9 +775,14 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
// In case we added content, retrieve its digest.
- addedContentDigest, err := s.digestSpecifiedContent(ctx, node, ib.Arguments(), ib.Config().Env)
- if err != nil {
- return "", nil, err
+ addedContentType, addedContentDigest := s.builder.ContentDigester.Digest()
+ addedContentSummary := addedContentType
+ if addedContentDigest != "" {
+ if addedContentSummary != "" {
+ addedContentSummary = addedContentSummary + ":"
+ }
+ addedContentSummary = addedContentSummary + addedContentDigest.Encoded()
+ logrus.Debugf("added content %s", addedContentSummary)
}
if moreInstructions {
// There are still more instructions to process
@@ -943,16 +790,17 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// instruction in the history that we'll write
// for the image when we eventually commit it.
now := time.Now()
- s.builder.AddPrependedEmptyLayer(&now, s.getCreatedBy(node, addedContentDigest), "", "")
+ s.builder.AddPrependedEmptyLayer(&now, s.getCreatedBy(node, addedContentSummary), "", "")
continue
} else {
// This is the last instruction for this stage,
// so we should commit this container to create
- // an image, but only if it's the last one, or
- // if it's used as the basis for a later stage.
+ // an image, but only if it's the last stage,
+ // or if it's used as the basis for a later
+ // stage.
if lastStage || imageIsUsedLater {
logCommit(s.output, i)
- imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentDigest), false, s.output)
+ imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
}
@@ -966,10 +814,11 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// We're in a multi-layered build.
var (
- commitName string
- cacheID string
- err error
- rebase bool
+ commitName string
+ cacheID string
+ err error
+ rebase bool
+ addedContentSummary string
)
// If we have to commit for this instruction, only assign the
@@ -978,46 +827,47 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
commitName = s.output
}
- // If we're using the cache, and we've managed to stick with
- // cached images so far, look for one that matches what we
- // expect to produce for this instruction.
- if checkForLayers && !(s.executor.squash && lastInstruction && lastStage) {
- addedContentDigest, err := s.digestSpecifiedContent(ctx, node, ib.Arguments(), ib.Config().Env)
- if err != nil {
- return "", nil, err
- }
- cacheID, err = s.intermediateImageExists(ctx, node, addedContentDigest)
+ // Check if there's already an image based on our parent that
+ // has the same change that we're about to make, so far as we
+ // can tell.
+ if checkForLayers {
+ cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
if err != nil {
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
}
- if cacheID != "" {
- // Note the cache hit.
- logCacheHit(cacheID)
- } else {
- // We're not going to find any more cache hits.
- checkForLayers = false
- }
}
- if cacheID != "" {
- // A suitable cached image was found, so just reuse it.
- // If we need to name the resulting image because it's
- // the last step in this stage, add the name to the
- // image.
- imgID = cacheID
- if commitName != "" {
- logCommit(commitName, i)
- if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil {
- return "", nil, err
+ // If we didn't find a cache entry, or we need to add content
+ // to find the digest of the content to check for a cached
+ // image, run the step so that we can check if the result
+ // matches a cache.
+ if cacheID == "" {
+ // Process the instruction directly.
+ if err = ib.Run(step, s, noRunsRemaining); err != nil {
+ logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
+ return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
+ }
+
+ // In case we added content, retrieve its digest.
+ addedContentType, addedContentDigest := s.builder.ContentDigester.Digest()
+ addedContentSummary = addedContentType
+ if addedContentDigest != "" {
+ if addedContentSummary != "" {
+ addedContentSummary = addedContentSummary + ":"
+ }
+ addedContentSummary = addedContentSummary + addedContentDigest.Encoded()
+ logrus.Debugf("added content %s", addedContentSummary)
+ }
+
+ // Check if there's already an image based on our parent that
+ // has the same change that we just made.
+ if checkForLayers {
+ cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
+ if err != nil {
+ return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
}
- logImageID(imgID)
}
- // Update our working container to be based off of the
- // cached image, if we might need to use it as a basis
- // for the next instruction, or if we need the root
- // filesystem to match the image contents for the sake
- // of a later stage that wants to copy content from it.
- rebase = moreInstructions || rootfsIsUsedLater
+ } else {
// If the instruction would affect our configuration,
// process the configuration change so that, if we fall
// off the cache path, the filesystem changes from the
@@ -1031,34 +881,41 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
}
- } else {
- // If we didn't find a cached image that we could just reuse,
- // process the instruction directly.
- err := ib.Run(step, s, noRunsRemaining)
- if err != nil {
- logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
- return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
- }
- // In case we added content, retrieve its digest.
- addedContentDigest, err := s.digestSpecifiedContent(ctx, node, ib.Arguments(), ib.Config().Env)
- if err != nil {
- return "", nil, err
+ }
+
+ if cacheID != "" && !(s.executor.squash && lastInstruction) {
+ logCacheHit(cacheID)
+ // A suitable cached image was found, so we can just
+ // reuse it. If we need to add a name to the resulting
+ // image because it's the last step in this stage, add
+ // the name to the image.
+ imgID = cacheID
+ if commitName != "" {
+ logCommit(commitName, i)
+ if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil {
+ return "", nil, err
+ }
}
- // Create a new image, maybe with a new layer.
+ } else {
+ // We're not going to find any more cache hits, so we
+ // can stop looking for them.
+ checkForLayers = false
+ // Create a new image, maybe with a new layer, with the
+ // name for this stage if it's the last instruction.
logCommit(s.output, i)
- imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentDigest), !s.stepRequiresLayer(step), commitName)
+ imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
}
- logImageID(imgID)
- // We only need to build a new container rootfs
- // using this image if we plan on making
- // further changes to it. Subsequent stages
- // that just want to use the rootfs as a source
- // for COPY or ADD will be content with what we
- // already have.
- rebase = moreInstructions
}
+ logImageID(imgID)
+
+ // Update our working container to be based off of the cached
+ // image, if we might need to use it as a basis for the next
+ // instruction, or if we need the root filesystem to match the
+ // image contents for the sake of a later stage that wants to
+ // copy content from it.
+ rebase = moreInstructions || rootfsIsUsedLater
if rebase {
// Since we either committed the working container or
@@ -1105,29 +962,58 @@ func historyEntriesEqual(base, derived v1.History) bool {
return true
}
-// historyMatches returns true if a candidate history matches the history of our
-// base image (if we have one), plus the current instruction.
+// historyAndDiffIDsMatch returns true if a candidate history matches the
+// history of our base image (if we have one), plus the current instruction,
+// and if the list of diff IDs for the images do for the part of the history
+// that we're comparing.
// Used to verify whether a cache of the intermediate image exists and whether
// to run the build again.
-func (s *StageExecutor) historyMatches(baseHistory []v1.History, child *parser.Node, history []v1.History, addedContentDigest string) bool {
- if len(baseHistory) >= len(history) {
- return false
- }
- if len(history)-len(baseHistory) != 1 {
+func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDiffIDs []digest.Digest, child *parser.Node, history []v1.History, diffIDs []digest.Digest, addedContentSummary string, buildAddsLayer bool) bool {
+ // our history should be as long as the base's, plus one entry for what
+ // we're doing
+ if len(history) != len(baseHistory)+1 {
return false
}
+ // check that each entry in the base history corresponds to an entry in
+ // our history, and count how many of them add a layer diff
+ expectedDiffIDs := 0
for i := range baseHistory {
if !historyEntriesEqual(baseHistory[i], history[i]) {
return false
}
+ if !baseHistory[i].EmptyLayer {
+ expectedDiffIDs++
+ }
+ }
+ if len(baseDiffIDs) != expectedDiffIDs {
+ return false
+ }
+ if buildAddsLayer {
+ // we're adding a layer, so we should have exactly one more
+ // layer than the base image
+ if len(diffIDs) != expectedDiffIDs+1 {
+ return false
+ }
+ } else {
+ // we're not adding a layer, so we should have exactly the same
+ // layers as the base image
+ if len(diffIDs) != expectedDiffIDs {
+ return false
+ }
+ }
+ // compare the diffs for the layers that we should have in common
+ for i := range baseDiffIDs {
+ if diffIDs[i] != baseDiffIDs[i] {
+ return false
+ }
}
- return history[len(baseHistory)].CreatedBy == s.getCreatedBy(child, addedContentDigest)
+ return history[len(baseHistory)].CreatedBy == s.getCreatedBy(child, addedContentSummary)
}
// getCreatedBy returns the command the image at node will be created by. If
// the passed-in CompositeDigester is not nil, it is assumed to have the digest
// information for the content if the node is ADD or COPY.
-func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentDigest string) string {
+func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary string) string {
if node == nil {
return "/bin/sh"
}
@@ -1143,7 +1029,7 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentDigest strin
for destination.Next != nil {
destination = destination.Next
}
- return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentDigest + " in " + destination.Value + " "
+ return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentSummary + " in " + destination.Value + " "
default:
return "/bin/sh -c #(nop) " + node.Original
}
@@ -1212,40 +1098,54 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st
// intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build.
// It verifies this by checking the parent of the top layer of the image and the history.
-func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string) (string, error) {
+func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) {
// Get the list of images available in the image store
images, err := s.executor.store.Images()
if err != nil {
return "", errors.Wrap(err, "error getting image list from store")
}
var baseHistory []v1.History
+ var baseDiffIDs []digest.Digest
if s.builder.FromImageID != "" {
- baseHistory, err = s.executor.getImageHistory(ctx, s.builder.FromImageID)
+ baseHistory, baseDiffIDs, err = s.executor.getImageHistoryAndDiffIDs(ctx, s.builder.FromImageID)
if err != nil {
return "", errors.Wrapf(err, "error getting history of base image %q", s.builder.FromImageID)
}
}
for _, image := range images {
var imageTopLayer *storage.Layer
+ var imageParentLayerID string
if image.TopLayer != "" {
imageTopLayer, err = s.executor.store.Layer(image.TopLayer)
if err != nil {
return "", errors.Wrapf(err, "error getting top layer info")
}
+ // Figure out which layer from this image we should
+ // compare our container's base layer to.
+ imageParentLayerID = imageTopLayer.ID
+ // If we haven't added a layer here, then our base
+ // layer should be the same as the image's layer. If
+ // did add a layer, then our base layer should be the
+ // same as the parent of the image's layer.
+ if buildAddsLayer {
+ imageParentLayerID = imageTopLayer.Parent
+ }
}
// If the parent of the top layer of an image is equal to the current build image's top layer,
// it means that this image is potentially a cached intermediate image from a previous
- // build. Next we double check that the history of this image is equivalent to the previous
+ // build.
+ if s.builder.TopLayer != imageParentLayerID {
+ continue
+ }
+ // Next we double check that the history of this image is equivalent to the previous
// lines in the Dockerfile up till the point we are at in the build.
- if imageTopLayer == nil || (s.builder.TopLayer != "" && (imageTopLayer.Parent == s.builder.TopLayer || imageTopLayer.ID == s.builder.TopLayer)) {
- history, err := s.executor.getImageHistory(ctx, image.ID)
- if err != nil {
- return "", errors.Wrapf(err, "error getting history of %q", image.ID)
- }
- // children + currNode is the point of the Dockerfile we are currently at.
- if s.historyMatches(baseHistory, currNode, history, addedContentDigest) {
- return image.ID, nil
- }
+ history, diffIDs, err := s.executor.getImageHistoryAndDiffIDs(ctx, image.ID)
+ if err != nil {
+ return "", errors.Wrapf(err, "error getting history of %q", image.ID)
+ }
+ // children + currNode is the point of the Dockerfile we are currently at.
+ if s.historyAndDiffIDsMatch(baseHistory, baseDiffIDs, currNode, history, diffIDs, addedContentDigest, buildAddsLayer) {
+ return image.ID, nil
}
}
return "", nil
@@ -1355,7 +1255,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
SignBy: s.executor.signBy,
MaxRetries: s.executor.maxPullPushRetries,
RetryDelay: s.executor.retryPullPushDelay,
- OmitTimestamp: s.executor.omitTimestamp,
+ HistoryTimestamp: s.executor.timestamp,
}
imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options)
if err != nil {
@@ -1373,29 +1273,5 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
}
func (s *StageExecutor) EnsureContainerPath(path string) error {
- targetPath, err := securejoin.SecureJoin(s.mountPoint, path)
- if err != nil {
- return errors.Wrapf(err, "error ensuring container path %q", path)
- }
-
- _, err = os.Stat(targetPath)
- if err != nil && os.IsNotExist(err) {
- err = os.MkdirAll(targetPath, 0755)
- if err != nil {
- return errors.Wrapf(err, "error creating directory path %q", targetPath)
- }
- // get the uid and gid so that we can set the correct permissions on the
- // working directory
- uid, gid, _, err := chrootuser.GetUser(s.mountPoint, s.builder.User())
- if err != nil {
- return errors.Wrapf(err, "error getting uid and gid for user %q", s.builder.User())
- }
- if err = os.Chown(targetPath, int(uid), int(gid)); err != nil {
- return errors.Wrapf(err, "error setting ownership on %q", targetPath)
- }
- }
- if err != nil {
- return errors.Wrapf(err, "error ensuring container path %q", path)
- }
- return nil
+ return copier.Mkdir(s.mountPoint, path, copier.MkdirOptions{})
}
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index c1751bc8c..41d545bd8 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -65,7 +65,7 @@ type BudResults struct {
Logfile string
Loglevel int
NoCache bool
- OmitTimestamp bool
+ Timestamp int64
OS string
Platform string
Pull bool
@@ -82,6 +82,7 @@ type BudResults struct {
Target string
TLSVerify bool
Jobs int
+ LogRusage bool
}
// FromAndBugResults represents the results for common flags
@@ -164,7 +165,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.")
fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
fs.IntVar(&flags.Loglevel, "loglevel", 0, "adjust logging level (range from -2 to 3)")
- fs.BoolVar(&flags.OmitTimestamp, "omit-timestamp", false, "set created timestamp to epoch 0 to allow for deterministic builds")
+ fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set created timestamp to the specified epoch seconds to allow for deterministic builds, defaults to current time")
fs.StringVar(&flags.OS, "os", runtime.GOOS, "set the OS to the provided value instead of the current operating system of the host")
fs.StringVar(&flags.Platform, "platform", parse.DefaultPlatform(), "set the OS/ARCH to the provided value instead of the current operating system and architecture of the host (for example `linux/arm`)")
fs.BoolVar(&flags.Pull, "pull", true, "pull the image from the registry if newer or not present in store, if false, only pull the image if not present")
@@ -181,6 +182,10 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.StringVar(&flags.Target, "target", "", "set the target build stage to build")
fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel")
+ fs.BoolVar(&flags.LogRusage, "log-rusage", false, "log resource usage at each build step")
+ if err := fs.MarkHidden("log-rusage"); err != nil {
+ panic(fmt.Sprintf("error marking the log-rusage flag as hidden: %v", err))
+ }
return fs
}
diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage.go
new file mode 100644
index 000000000..7b1226d24
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage.go
@@ -0,0 +1,48 @@
+package rusage
+
+import (
+ "fmt"
+ "time"
+
+ units "github.com/docker/go-units"
+)
+
+// Rusage is a subset of a Unix-style resource usage counter for the current
+// process and its children. The counters are always 0 on platforms where the
+// system call is not available (i.e., systems where getrusage() doesn't
+// exist).
+type Rusage struct {
+ Date time.Time
+ Elapsed time.Duration
+ Utime, Stime time.Duration
+ Inblock, Outblock int64
+}
+
+// FormatDiff formats the result of rusage.Rusage.Subtract() for logging.
+func FormatDiff(diff Rusage) string {
+ return fmt.Sprintf("%s(system) %s(user) %s(elapsed) %s input %s output", diff.Stime.Round(time.Millisecond), diff.Utime.Round(time.Millisecond), diff.Elapsed.Round(time.Millisecond), units.HumanSize(float64(diff.Inblock*512)), units.HumanSize(float64(diff.Outblock*512)))
+}
+
+// Subtract subtracts the items in delta from r, and returns the difference.
+// The Date field is zeroed for easier comparison with the zero value for the
+// Rusage type.
+func (r Rusage) Subtract(baseline Rusage) Rusage {
+ return Rusage{
+ Elapsed: r.Date.Sub(baseline.Date),
+ Utime: r.Utime - baseline.Utime,
+ Stime: r.Stime - baseline.Stime,
+ Inblock: r.Inblock - baseline.Inblock,
+ Outblock: r.Outblock - baseline.Outblock,
+ }
+}
+
+// Get returns the counters for the current process and its children,
+// subtracting any values in the passed in "since" value, or an error.
+// The Elapsed field will always be set to zero.
+func Get() (Rusage, error) {
+ counters, err := get()
+ if err != nil {
+ return Rusage{}, err
+ }
+ return counters, nil
+}
diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go
new file mode 100644
index 000000000..5bfed45c1
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go
@@ -0,0 +1,35 @@
+// +build !windows
+
+package rusage
+
+import (
+ "syscall"
+ "time"
+
+ "github.com/pkg/errors"
+)
+
+func mkduration(tv syscall.Timeval) time.Duration {
+ return time.Duration(tv.Sec)*time.Second + time.Duration(tv.Usec)*time.Microsecond
+}
+
+func get() (Rusage, error) {
+ var rusage syscall.Rusage
+ err := syscall.Getrusage(syscall.RUSAGE_CHILDREN, &rusage)
+ if err != nil {
+ return Rusage{}, errors.Wrapf(err, "error getting resource usage")
+ }
+ r := Rusage{
+ Date: time.Now(),
+ Utime: mkduration(rusage.Utime),
+ Stime: mkduration(rusage.Stime),
+ Inblock: int64(rusage.Inblock), // nolint: unconvert
+ Outblock: int64(rusage.Oublock), // nolint: unconvert
+ }
+ return r, nil
+}
+
+// Supported returns true if resource usage counters are supported on this OS.
+func Supported() bool {
+ return true
+}
diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go
new file mode 100644
index 000000000..031c81402
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go
@@ -0,0 +1,18 @@
+// +build windows
+
+package rusage
+
+import (
+ "syscall"
+
+ "github.com/pkg/errors"
+)
+
+func get() (Rusage, error) {
+ return Rusage{}, errors.Wrapf(syscall.ENOTSUP, "error getting resource usage")
+}
+
+// Supported returns true if resource usage counters are supported on this OS.
+func Supported() bool {
+ return false
+}
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index f8d4bdeb6..be9a521d1 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -280,7 +280,7 @@ func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageRefer
}()
logrus.Debugf("copying %q to %q", transports.ImageName(srcRef), destName)
- if _, err := retryCopyImage(ctx, policyContext, maybeCachedDestRef, srcRef, srcRef, "pull", getCopyOptions(store, options.ReportWriter, sc, nil, "", options.RemoveSignatures, "", nil, nil, options.OciDecryptConfig), options.MaxRetries, options.RetryDelay); err != nil {
+ if _, err := retryCopyImage(ctx, policyContext, maybeCachedDestRef, srcRef, srcRef, getCopyOptions(store, options.ReportWriter, sc, nil, "", options.RemoveSignatures, "", nil, nil, options.OciDecryptConfig), options.MaxRetries, options.RetryDelay); err != nil {
logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", transports.ImageName(srcRef), destName, err)
return nil, err
}
diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go
index e21e3cd91..d83b3a5cc 100644
--- a/vendor/github.com/containers/buildah/run_linux.go
+++ b/vendor/github.com/containers/buildah/run_linux.go
@@ -316,7 +316,7 @@ func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator)
return nil
}
-func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) {
+func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) {
var mounts []specs.Mount
hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID}
// Add temporary copies of the contents of volume locations at the
@@ -359,7 +359,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWit
if err = os.Chown(volumePath, int(stat.Sys().(*syscall.Stat_t).Uid), int(stat.Sys().(*syscall.Stat_t).Gid)); err != nil {
return nil, errors.Wrapf(err, "error chowning directory %q for volume %q", volumePath, volume)
}
- if err = copyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(errors.Cause(err)) {
+ if err = extractWithTar(mountPoint, srcPath, volumePath); err != nil && !os.IsNotExist(errors.Cause(err)) {
return nil, errors.Wrapf(err, "error populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath)
}
}
@@ -483,8 +483,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
- copyWithTar := b.copyWithTar(nil, nil, nil, false)
- builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes, int(rootUID), int(rootGID))
+ builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, builtinVolumes, int(rootUID), int(rootGID))
if err != nil {
return err
}
@@ -864,12 +863,12 @@ func runUsingRuntime(isolation Isolation, options RunOptions, configureNetwork b
stat := exec.Command(runtime, args...)
stat.Dir = bundlePath
stat.Stderr = os.Stderr
- stateOutput, stateErr := stat.Output()
- if stateErr != nil {
- return 1, errors.Wrapf(stateErr, "error reading container state")
+ stateOutput, err := stat.Output()
+ if err != nil {
+ return 1, errors.Wrapf(err, "error reading container state (got output: %q)", string(stateOutput))
}
if err = json.Unmarshal(stateOutput, &state); err != nil {
- return 1, errors.Wrapf(stateErr, "error parsing container state %q", string(stateOutput))
+ return 1, errors.Wrapf(err, "error parsing container state %q", string(stateOutput))
}
switch state.Status {
case "running":
diff --git a/vendor/github.com/containers/buildah/seccomp.go b/vendor/github.com/containers/buildah/seccomp.go
index a435b5f71..fc7811098 100644
--- a/vendor/github.com/containers/buildah/seccomp.go
+++ b/vendor/github.com/containers/buildah/seccomp.go
@@ -5,9 +5,9 @@ package buildah
import (
"io/ioutil"
+ "github.com/containers/common/pkg/seccomp"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
- seccomp "github.com/seccomp/containers-golang"
)
func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
diff --git a/vendor/github.com/containers/buildah/selinux.go b/vendor/github.com/containers/buildah/selinux.go
index e64eb6112..fea863165 100644
--- a/vendor/github.com/containers/buildah/selinux.go
+++ b/vendor/github.com/containers/buildah/selinux.go
@@ -7,6 +7,10 @@ import (
selinux "github.com/opencontainers/selinux/go-selinux"
)
+func selinuxGetEnabled() bool {
+ return selinux.GetEnabled()
+}
+
func setupSelinux(g *generate.Generator, processLabel, mountLabel string) {
if processLabel != "" && selinux.GetEnabled() {
g.SetProcessSelinuxLabel(processLabel)
diff --git a/vendor/github.com/containers/buildah/selinux_unsupported.go b/vendor/github.com/containers/buildah/selinux_unsupported.go
index 0aa7c46e4..fb9213e29 100644
--- a/vendor/github.com/containers/buildah/selinux_unsupported.go
+++ b/vendor/github.com/containers/buildah/selinux_unsupported.go
@@ -6,5 +6,9 @@ import (
"github.com/opencontainers/runtime-tools/generate"
)
+func selinuxGetEnabled() bool {
+ return false
+}
+
func setupSelinux(g *generate.Generator, processLabel, mountLabel string) {
}
diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go
index f95c5ba57..4b5a00e44 100644
--- a/vendor/github.com/containers/buildah/util.go
+++ b/vendor/github.com/containers/buildah/util.go
@@ -1,26 +1,20 @@
package buildah
import (
- "archive/tar"
"io"
- "io/ioutil"
"os"
"path/filepath"
+ "sync"
- "github.com/containers/buildah/util"
+ "github.com/containers/buildah/copier"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
- "github.com/containers/storage/pkg/archive"
- "github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
- "github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/reexec"
- "github.com/containers/storage/pkg/system"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
rspec "github.com/opencontainers/runtime-spec/specs-go"
- selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -109,245 +103,6 @@ func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMa
return uidmap, gidmap
}
-// copyFileWithTar returns a function which copies a single file from outside
-// of any container, or another container, into our working container, mapping
-// read permissions using the passed-in ID maps, writing using the container's
-// ID mappings, possibly overridden using the passed-in chownOpts
-func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
- if tarIDMappingOptions == nil {
- tarIDMappingOptions = &IDMappingOptions{
- HostUIDMapping: true,
- HostGIDMapping: true,
- }
- }
-
- var hardlinkChecker util.HardlinkChecker
- return func(src, dest string) error {
- var f *os.File
-
- logrus.Debugf("copyFileWithTar(%s, %s)", src, dest)
- fi, err := os.Lstat(src)
- if err != nil {
- return errors.Wrapf(err, "error reading attributes of %q", src)
- }
-
- sysfi, err := system.Lstat(src)
- if err != nil {
- return errors.Wrapf(err, "error reading attributes of %q", src)
- }
-
- hostUID := sysfi.UID()
- hostGID := sysfi.GID()
- containerUID, containerGID, err := util.GetContainerIDs(tarIDMappingOptions.UIDMap, tarIDMappingOptions.GIDMap, hostUID, hostGID)
- if err != nil {
- return errors.Wrapf(err, "error mapping owner IDs of %q: %d/%d", src, hostUID, hostGID)
- }
-
- hdr, err := tar.FileInfoHeader(fi, filepath.Base(src))
- if err != nil {
- return errors.Wrapf(err, "error generating tar header for: %q", src)
- }
- chrootedDest, err := filepath.Rel(b.MountPoint, dest)
- if err != nil {
- return errors.Wrapf(err, "error generating relative-to-chroot target name for %q", dest)
- }
- hdr.Name = chrootedDest
- hdr.Uid = int(containerUID)
- hdr.Gid = int(containerGID)
-
- if fi.Mode().IsRegular() && hdr.Typeflag == tar.TypeReg {
- if linkname := hardlinkChecker.Check(fi); linkname != "" {
- hdr.Typeflag = tar.TypeLink
- hdr.Linkname = linkname
- } else {
- hardlinkChecker.Add(fi, chrootedDest)
- f, err = os.Open(src)
- if err != nil {
- return errors.Wrapf(err, "error opening %q to copy its contents", src)
- }
- }
- }
-
- if fi.Mode()&os.ModeSymlink == os.ModeSymlink && hdr.Typeflag == tar.TypeSymlink {
- hdr.Typeflag = tar.TypeSymlink
- linkName, err := os.Readlink(src)
- if err != nil {
- return errors.Wrapf(err, "error reading destination from symlink %q", src)
- }
- hdr.Linkname = linkName
- }
-
- pipeReader, pipeWriter := io.Pipe()
- writer := tar.NewWriter(pipeWriter)
- var copyErr error
- go func(srcFile *os.File) {
- err := writer.WriteHeader(hdr)
- if err != nil {
- logrus.Debugf("error writing header for %s: %v", srcFile.Name(), err)
- copyErr = err
- }
- if srcFile != nil {
- n, err := pools.Copy(writer, srcFile)
- if n != hdr.Size {
- logrus.Debugf("expected to write %d bytes for %s, wrote %d instead", hdr.Size, srcFile.Name(), n)
- }
- if err != nil {
- logrus.Debugf("error copying contents of %s: %v", fi.Name(), err)
- copyErr = err
- }
- if err = srcFile.Close(); err != nil {
- logrus.Debugf("error closing %s: %v", fi.Name(), err)
- }
- }
- if err = writer.Close(); err != nil {
- logrus.Debugf("error closing write pipe for %s: %v", hdr.Name, err)
- }
- pipeWriter.Close()
- pipeWriter = nil
- }(f)
-
- untar := b.untar(chownOpts, hasher, dryRun)
- err = untar(pipeReader, b.MountPoint)
- if err == nil {
- err = copyErr
- }
- if pipeWriter != nil {
- pipeWriter.Close()
- }
- return err
- }
-}
-
-// copyWithTar returns a function which copies a directory tree from outside of
-// our container or from another container, into our working container, mapping
-// permissions at read-time using the container's ID maps, with ownership at
-// write-time possibly overridden using the passed-in chownOpts
-func (b *Builder) copyWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
- tar := b.tarPath(tarIDMappingOptions)
- return func(src, dest string) error {
- thisHasher := hasher
- if thisHasher != nil && b.ContentDigester.Hash() != nil {
- thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
- }
- if thisHasher == nil {
- thisHasher = b.ContentDigester.Hash()
- }
- untar := b.untar(chownOpts, thisHasher, dryRun)
- rc, err := tar(src)
- if err != nil {
- return errors.Wrapf(err, "error archiving %q for copy", src)
- }
- return untar(rc, dest)
- }
-}
-
-// untarPath returns a function which extracts an archive in a specified
-// location into our working container, mapping permissions using the
-// container's ID maps, possibly overridden using the passed-in chownOpts
-func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
- convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
- if dryRun {
- return func(src, dest string) error {
- thisHasher := hasher
- if thisHasher != nil && b.ContentDigester.Hash() != nil {
- thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
- }
- if thisHasher == nil {
- thisHasher = b.ContentDigester.Hash()
- }
- f, err := os.Open(src)
- if err != nil {
- return errors.Wrapf(err, "error opening %q", src)
- }
- defer f.Close()
- _, err = io.Copy(thisHasher, f)
- return err
- }
- }
- return func(src, dest string) error {
- thisHasher := hasher
- if thisHasher != nil && b.ContentDigester.Hash() != nil {
- thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
- }
- if thisHasher == nil {
- thisHasher = b.ContentDigester.Hash()
- }
- untarPathAndChown := chrootarchive.UntarPathAndChown(chownOpts, thisHasher, convertedUIDMap, convertedGIDMap)
- return untarPathAndChown(src, dest)
- }
-}
-
-// tarPath returns a function which creates an archive of a specified location,
-// which is often somewhere in the container's filesystem, mapping permissions
-// using the container's ID maps, or the passed-in maps if specified
-func (b *Builder) tarPath(idMappingOptions *IDMappingOptions) func(path string) (io.ReadCloser, error) {
- var uidmap, gidmap []idtools.IDMap
- if idMappingOptions == nil {
- idMappingOptions = &IDMappingOptions{
- HostUIDMapping: true,
- HostGIDMapping: true,
- }
- }
- convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(idMappingOptions.UIDMap, idMappingOptions.GIDMap)
- tarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
- uidmap = tarMappings.UIDs()
- gidmap = tarMappings.GIDs()
- options := &archive.TarOptions{
- Compression: archive.Uncompressed,
- UIDMaps: uidmap,
- GIDMaps: gidmap,
- }
- return func(path string) (io.ReadCloser, error) {
- return archive.TarWithOptions(path, options)
- }
-}
-
-// untar returns a function which extracts an archive stream to a specified
-// location in the container's filesystem, mapping permissions using the
-// container's ID maps, possibly overridden using the passed-in chownOpts
-func (b *Builder) untar(chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(tarArchive io.ReadCloser, dest string) error {
- convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
- untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
- options := &archive.TarOptions{
- UIDMaps: untarMappings.UIDs(),
- GIDMaps: untarMappings.GIDs(),
- ChownOpts: chownOpts,
- }
- untar := chrootarchive.Untar
- if dryRun {
- untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
- if _, err := io.Copy(ioutil.Discard, tarArchive); err != nil {
- return errors.Wrapf(err, "error digesting tar stream")
- }
- return nil
- }
- }
- originalUntar := untar
- untarWithHasher := func(tarArchive io.Reader, dest string, options *archive.TarOptions, untarHasher io.Writer) error {
- reader := tarArchive
- if untarHasher != nil {
- reader = io.TeeReader(tarArchive, untarHasher)
- }
- return originalUntar(reader, dest, options)
- }
- return func(tarArchive io.ReadCloser, dest string) error {
- thisHasher := hasher
- if thisHasher != nil && b.ContentDigester.Hash() != nil {
- thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
- }
- if thisHasher == nil {
- thisHasher = b.ContentDigester.Hash()
- }
- err := untarWithHasher(tarArchive, dest, options, thisHasher)
- if err2 := tarArchive.Close(); err2 != nil {
- if err == nil {
- err = err2
- }
- }
- return err
- }
-}
-
// isRegistryBlocked checks if the named registry is marked as blocked
func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) {
reginfo, err := sysregistriesv2.FindRegistry(sc, registry)
@@ -389,10 +144,10 @@ func isReferenceBlocked(ref types.ImageReference, sc *types.SystemContext) (bool
return false, nil
}
-// ReserveSELinuxLabels reads containers storage and reserves SELinux containers
-// fall all existing buildah containers
+// ReserveSELinuxLabels reads containers storage and reserves SELinux contexts
+// which are already being used by buildah containers.
func ReserveSELinuxLabels(store storage.Store, id string) error {
- if selinux.GetEnabled() {
+ if selinuxGetEnabled() {
containers, err := store.Containers()
if err != nil {
return errors.Wrapf(err, "error getting list of containers")
@@ -438,3 +193,35 @@ func IsContainer(id string, store storage.Store) (bool, error) {
}
return true, nil
}
+
+// Copy content from the directory "src" to the directory "dest", ensuring that
+// content from outside of "root" (which is a parent of "src" or "src" itself)
+// isn't read.
+func extractWithTar(root, src, dest string) error {
+ var getErr, putErr error
+ var wg sync.WaitGroup
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ wg.Add(1)
+ go func() {
+ getErr = copier.Get(root, src, copier.GetOptions{}, []string{"."}, pipeWriter)
+ pipeWriter.Close()
+ wg.Done()
+ }()
+ wg.Add(1)
+ go func() {
+ putErr = copier.Put(dest, dest, copier.PutOptions{}, pipeReader)
+ pipeReader.Close()
+ wg.Done()
+ }()
+ wg.Wait()
+
+ if getErr != nil {
+ return errors.Wrapf(getErr, "error reading %q", src)
+ }
+ if putErr != nil {
+ return errors.Wrapf(putErr, "error copying contents of %q to %q", src, dest)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index 78811b47a..568f43c17 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -658,10 +658,10 @@ func (c *NetworkConfig) Validate() error {
// ValidatePullPolicy check if the pullPolicy from CLI is valid and returns the valid enum type
// if the value from CLI or containers.conf is invalid returns the error
func ValidatePullPolicy(pullPolicy string) (PullPolicy, error) {
- switch pullPolicy {
+ switch strings.ToLower(pullPolicy) {
case "always":
return PullImageAlways, nil
- case "missing":
+ case "missing", "ifnotpresent":
return PullImageMissing, nil
case "never":
return PullImageNever, nil
diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_default_linux.go b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go
index f12cf02c9..e2a2c68f1 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/seccomp_default_linux.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go
@@ -65,9 +65,11 @@ func DefaultProfile() *Seccomp {
"chmod",
"chown",
"chown32",
+ "clock_adjtime",
"clock_getres",
"clock_gettime",
"clock_nanosleep",
+ "clone",
"close",
"connect",
"copy_file_range",
@@ -89,6 +91,7 @@ func DefaultProfile() *Seccomp {
"exit",
"exit_group",
"faccessat",
+ "faccessat2",
"fadvise64",
"fadvise64_64",
"fallocate",
@@ -96,6 +99,7 @@ func DefaultProfile() *Seccomp {
"fchdir",
"fchmod",
"fchmodat",
+ "fchmodat2",
"fchown",
"fchown32",
"fchownat",
@@ -215,9 +219,11 @@ func DefaultProfile() *Seccomp {
"newfstatat",
"open",
"openat",
+ "openat2",
"pause",
"pipe",
"pipe2",
+ "pivot_root",
"poll",
"ppoll",
"prctl",
diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json
index 06b39024a..a8b87175e 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json
+++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json
@@ -67,9 +67,11 @@
"chmod",
"chown",
"chown32",
+ "clock_adjtime",
"clock_getres",
"clock_gettime",
"clock_nanosleep",
+ "clone",
"close",
"connect",
"copy_file_range",
@@ -91,6 +93,7 @@
"exit",
"exit_group",
"faccessat",
+ "faccessat2",
"fadvise64",
"fadvise64_64",
"fallocate",
@@ -98,6 +101,7 @@
"fchdir",
"fchmod",
"fchmodat",
+ "fchmodat2",
"fchown",
"fchown32",
"fchownat",
@@ -217,9 +221,11 @@
"newfstatat",
"open",
"openat",
+ "openat2",
"pause",
"pipe",
"pipe2",
+ "pivot_root",
"poll",
"ppoll",
"prctl",
@@ -875,4 +881,4 @@
"excludes": {}
}
]
-} \ No newline at end of file
+}
diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go
index 1f05ea3d9..7fcbf40a1 100644
--- a/vendor/github.com/containers/common/version/version.go
+++ b/vendor/github.com/containers/common/version/version.go
@@ -1,4 +1,4 @@
package version
// Version is the version of the build.
-const Version = "0.21.0"
+const Version = "0.22.0"
diff --git a/vendor/github.com/seccomp/containers-golang/.gitignore b/vendor/github.com/seccomp/containers-golang/.gitignore
deleted file mode 100644
index e433eef88..000000000
--- a/vendor/github.com/seccomp/containers-golang/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-*.orig
-generate
diff --git a/vendor/github.com/seccomp/containers-golang/LICENSE b/vendor/github.com/seccomp/containers-golang/LICENSE
deleted file mode 100644
index bd465fcf0..000000000
--- a/vendor/github.com/seccomp/containers-golang/LICENSE
+++ /dev/null
@@ -1,190 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2018-2019 github.com/seccomp authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/seccomp/containers-golang/Makefile b/vendor/github.com/seccomp/containers-golang/Makefile
deleted file mode 100644
index 2d91917f9..000000000
--- a/vendor/github.com/seccomp/containers-golang/Makefile
+++ /dev/null
@@ -1,32 +0,0 @@
-export GO111MODULE=off
-
-TAGS ?= seccomp
-BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)"
-GO := go
-PACKAGE := github.com/seccomp/containers-golang
-
-sources := $(wildcard *.go)
-
-.PHONY: seccomp.json
-seccomp.json: $(sources)
- $(GO) build -compiler gc $(BUILDFLAGS) ./cmd/generate.go
- $(GO) build -compiler gc ./cmd/generate.go
- $(GO) run ${BUILDFLAGS} cmd/generate.go
-
-all: seccomp.json
-
-.PHONY: test-unit
-test-unit:
- $(GO) test -v $(BUILDFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
- $(GO) test -v $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
-
-.PHONY: vendor
-vendor:
- export GO111MODULE=on \
- $(GO) mod tidy && \
- $(GO) mod vendor && \
- $(GO) mod verify
-
-.PHONY: clean
-clean:
- rm -f generate
diff --git a/vendor/github.com/seccomp/containers-golang/README.md b/vendor/github.com/seccomp/containers-golang/README.md
deleted file mode 100644
index a44238432..000000000
--- a/vendor/github.com/seccomp/containers-golang/README.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# containers-golang
-
-[![CircleCI](https://circleci.com/gh/seccomp/containers-golang.svg?style=shield)](https://circleci.com/gh/seccomp/containers-golang)
-
-`containers-golang` is a set of Go libraries used by container runtimes to generate and load seccomp mappings into the kernel.
-
-seccomp (short for secure computing mode) is a BPF based syscall filter language and present a more conventional function-call based filtering interface that should be familiar to, and easily adopted by, application developers.
-
-## Building
- make - Generates seccomp.json file, which contains the whitelisted syscalls that can be used by container runtime engines like [CRI-O][cri-o], [Buildah][buildah], [Podman][podman] and [Docker][docker], and container runtimes like OCI [Runc][runc] to controll the syscalls available to containers.
-
-### Supported build tags
-
- `seccomp`
-
-## Contributing
-
-When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation.
-
-## Contact
-
-- IRC: #[containers](irc://irc.freenode.net:6667/#containers) on freenode.net
-
-[cri-o]: https://github.com/kubernetes-incubator/cri-o/pulls
-[buildah]: https://github.com/projectatomic/buildah
-[podman]: https://github.com/projectatomic/podman
-[docker]: https://github.com/docker/docker
-[runc]: https://github.com/opencontainers/runc
-
diff --git a/vendor/github.com/seccomp/containers-golang/conversion.go b/vendor/github.com/seccomp/containers-golang/conversion.go
deleted file mode 100644
index 05564487b..000000000
--- a/vendor/github.com/seccomp/containers-golang/conversion.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package seccomp // import "github.com/seccomp/containers-golang"
-
-import "fmt"
-
-var goArchToSeccompArchMap = map[string]Arch{
- "386": ArchX86,
- "amd64": ArchX86_64,
- "amd64p32": ArchX32,
- "arm": ArchARM,
- "arm64": ArchAARCH64,
- "mips": ArchMIPS,
- "mips64": ArchMIPS64,
- "mips64le": ArchMIPSEL64,
- "mips64p32": ArchMIPS64N32,
- "mips64p32le": ArchMIPSEL64N32,
- "mipsle": ArchMIPSEL,
- "ppc": ArchPPC,
- "ppc64": ArchPPC64,
- "ppc64le": ArchPPC64LE,
- "s390": ArchS390,
- "s390x": ArchS390X,
-}
-
-// GoArchToSeccompArch converts a runtime.GOARCH to a seccomp `Arch`. The
-// function returns an error if the architecture conversion is not supported.
-func GoArchToSeccompArch(goArch string) (Arch, error) {
- arch, ok := goArchToSeccompArchMap[goArch]
- if !ok {
- return "", fmt.Errorf("unsupported go arch provided: %s", goArch)
- }
- return arch, nil
-}
diff --git a/vendor/github.com/seccomp/containers-golang/go.mod b/vendor/github.com/seccomp/containers-golang/go.mod
deleted file mode 100644
index 8e21f0f99..000000000
--- a/vendor/github.com/seccomp/containers-golang/go.mod
+++ /dev/null
@@ -1,16 +0,0 @@
-module github.com/seccomp/containers-golang
-
-go 1.14
-
-require (
- github.com/blang/semver v3.5.1+incompatible // indirect
- github.com/hashicorp/go-multierror v1.1.0 // indirect
- github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445
- github.com/opencontainers/runtime-tools v0.9.0
- github.com/opencontainers/selinux v1.6.0 // indirect
- github.com/seccomp/libseccomp-golang v0.9.1
- github.com/sirupsen/logrus v1.6.0 // indirect
- github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
- github.com/xeipuuv/gojsonschema v1.2.0 // indirect
- golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666
-)
diff --git a/vendor/github.com/seccomp/containers-golang/go.sum b/vendor/github.com/seccomp/containers-golang/go.sum
deleted file mode 100644
index d7fc538c0..000000000
--- a/vendor/github.com/seccomp/containers-golang/go.sum
+++ /dev/null
@@ -1,66 +0,0 @@
-github.com/blang/semver v1.1.0 h1:ol1rO7QQB5uy7umSNV7VAmLugfLRD+17sYJujRNYPhg=
-github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
-github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
-github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 h1:Dliu5QO+4JYWu/yMshaMU7G3JN2POGpwjJN7gjy10Go=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ=
-github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2-0.20191007145322-19e92ca81777 h1:7CkKaORyxoXsM8z56r+M0wf3uCpVGVqx4CWq7oJ/4DY=
-github.com/opencontainers/runtime-spec v1.0.2-0.20191007145322-19e92ca81777/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445 h1:y8cfsJRmn8g3VkM4IDpusKSgMUZEXhudm/BuYANLozE=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
-github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4FtGEe8bFg=
-github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
-github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqGe5TgR0g=
-github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
-github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY=
-github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
-github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
-github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
-github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM=
-github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v1.1.0 h1:ngVtJC9TY/lg0AA/1k48FYhBrhRoFlEmWzsehpNAaZg=
-github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
-github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
-github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc h1:EinpED/Eb9JUgDi6pkoFjw+tz69c3lHUZr2+Va84S0w=
-golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666 h1:gVCS+QOncANNPlmlO1AhlU3oxs4V9z+gTtPwIk3p2N8=
-golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp.json b/vendor/github.com/seccomp/containers-golang/seccomp.json
deleted file mode 100644
index 06b39024a..000000000
--- a/vendor/github.com/seccomp/containers-golang/seccomp.json
+++ /dev/null
@@ -1,878 +0,0 @@
-{
- "defaultAction": "SCMP_ACT_ERRNO",
- "archMap": [
- {
- "architecture": "SCMP_ARCH_X86_64",
- "subArchitectures": [
- "SCMP_ARCH_X86",
- "SCMP_ARCH_X32"
- ]
- },
- {
- "architecture": "SCMP_ARCH_AARCH64",
- "subArchitectures": [
- "SCMP_ARCH_ARM"
- ]
- },
- {
- "architecture": "SCMP_ARCH_MIPS64",
- "subArchitectures": [
- "SCMP_ARCH_MIPS",
- "SCMP_ARCH_MIPS64N32"
- ]
- },
- {
- "architecture": "SCMP_ARCH_MIPS64N32",
- "subArchitectures": [
- "SCMP_ARCH_MIPS",
- "SCMP_ARCH_MIPS64"
- ]
- },
- {
- "architecture": "SCMP_ARCH_MIPSEL64",
- "subArchitectures": [
- "SCMP_ARCH_MIPSEL",
- "SCMP_ARCH_MIPSEL64N32"
- ]
- },
- {
- "architecture": "SCMP_ARCH_MIPSEL64N32",
- "subArchitectures": [
- "SCMP_ARCH_MIPSEL",
- "SCMP_ARCH_MIPSEL64"
- ]
- },
- {
- "architecture": "SCMP_ARCH_S390X",
- "subArchitectures": [
- "SCMP_ARCH_S390"
- ]
- }
- ],
- "syscalls": [
- {
- "names": [
- "_llseek",
- "_newselect",
- "accept",
- "accept4",
- "access",
- "adjtimex",
- "alarm",
- "bind",
- "brk",
- "capget",
- "capset",
- "chdir",
- "chmod",
- "chown",
- "chown32",
- "clock_getres",
- "clock_gettime",
- "clock_nanosleep",
- "close",
- "connect",
- "copy_file_range",
- "creat",
- "dup",
- "dup2",
- "dup3",
- "epoll_create",
- "epoll_create1",
- "epoll_ctl",
- "epoll_ctl_old",
- "epoll_pwait",
- "epoll_wait",
- "epoll_wait_old",
- "eventfd",
- "eventfd2",
- "execve",
- "execveat",
- "exit",
- "exit_group",
- "faccessat",
- "fadvise64",
- "fadvise64_64",
- "fallocate",
- "fanotify_mark",
- "fchdir",
- "fchmod",
- "fchmodat",
- "fchown",
- "fchown32",
- "fchownat",
- "fcntl",
- "fcntl64",
- "fdatasync",
- "fgetxattr",
- "flistxattr",
- "flock",
- "fork",
- "fremovexattr",
- "fsetxattr",
- "fstat",
- "fstat64",
- "fstatat64",
- "fstatfs",
- "fstatfs64",
- "fsync",
- "ftruncate",
- "ftruncate64",
- "futex",
- "futimesat",
- "get_robust_list",
- "get_thread_area",
- "getcpu",
- "getcwd",
- "getdents",
- "getdents64",
- "getegid",
- "getegid32",
- "geteuid",
- "geteuid32",
- "getgid",
- "getgid32",
- "getgroups",
- "getgroups32",
- "getitimer",
- "getpeername",
- "getpgid",
- "getpgrp",
- "getpid",
- "getppid",
- "getpriority",
- "getrandom",
- "getresgid",
- "getresgid32",
- "getresuid",
- "getresuid32",
- "getrlimit",
- "getrusage",
- "getsid",
- "getsockname",
- "getsockopt",
- "gettid",
- "gettimeofday",
- "getuid",
- "getuid32",
- "getxattr",
- "inotify_add_watch",
- "inotify_init",
- "inotify_init1",
- "inotify_rm_watch",
- "io_cancel",
- "io_destroy",
- "io_getevents",
- "io_setup",
- "io_submit",
- "ioctl",
- "ioprio_get",
- "ioprio_set",
- "ipc",
- "kill",
- "lchown",
- "lchown32",
- "lgetxattr",
- "link",
- "linkat",
- "listen",
- "listxattr",
- "llistxattr",
- "lremovexattr",
- "lseek",
- "lsetxattr",
- "lstat",
- "lstat64",
- "madvise",
- "memfd_create",
- "mincore",
- "mkdir",
- "mkdirat",
- "mknod",
- "mknodat",
- "mlock",
- "mlock2",
- "mlockall",
- "mmap",
- "mmap2",
- "mount",
- "mprotect",
- "mq_getsetattr",
- "mq_notify",
- "mq_open",
- "mq_timedreceive",
- "mq_timedsend",
- "mq_unlink",
- "mremap",
- "msgctl",
- "msgget",
- "msgrcv",
- "msgsnd",
- "msync",
- "munlock",
- "munlockall",
- "munmap",
- "name_to_handle_at",
- "nanosleep",
- "newfstatat",
- "open",
- "openat",
- "pause",
- "pipe",
- "pipe2",
- "poll",
- "ppoll",
- "prctl",
- "pread64",
- "preadv",
- "preadv2",
- "prlimit64",
- "pselect6",
- "pwrite64",
- "pwritev",
- "pwritev2",
- "read",
- "readahead",
- "readlink",
- "readlinkat",
- "readv",
- "reboot",
- "recv",
- "recvfrom",
- "recvmmsg",
- "recvmsg",
- "remap_file_pages",
- "removexattr",
- "rename",
- "renameat",
- "renameat2",
- "restart_syscall",
- "rmdir",
- "rt_sigaction",
- "rt_sigpending",
- "rt_sigprocmask",
- "rt_sigqueueinfo",
- "rt_sigreturn",
- "rt_sigsuspend",
- "rt_sigtimedwait",
- "rt_tgsigqueueinfo",
- "sched_get_priority_max",
- "sched_get_priority_min",
- "sched_getaffinity",
- "sched_getattr",
- "sched_getparam",
- "sched_getscheduler",
- "sched_rr_get_interval",
- "sched_setaffinity",
- "sched_setattr",
- "sched_setparam",
- "sched_setscheduler",
- "sched_yield",
- "seccomp",
- "select",
- "semctl",
- "semget",
- "semop",
- "semtimedop",
- "send",
- "sendfile",
- "sendfile64",
- "sendmmsg",
- "sendmsg",
- "sendto",
- "set_robust_list",
- "set_thread_area",
- "set_tid_address",
- "setfsgid",
- "setfsgid32",
- "setfsuid",
- "setfsuid32",
- "setgid",
- "setgid32",
- "setgroups",
- "setgroups32",
- "setitimer",
- "setpgid",
- "setpriority",
- "setregid",
- "setregid32",
- "setresgid",
- "setresgid32",
- "setresuid",
- "setresuid32",
- "setreuid",
- "setreuid32",
- "setrlimit",
- "setsid",
- "setsockopt",
- "setuid",
- "setuid32",
- "setxattr",
- "shmat",
- "shmctl",
- "shmdt",
- "shmget",
- "shutdown",
- "sigaltstack",
- "signalfd",
- "signalfd4",
- "sigreturn",
- "socketcall",
- "socketpair",
- "splice",
- "stat",
- "stat64",
- "statfs",
- "statfs64",
- "statx",
- "symlink",
- "symlinkat",
- "sync",
- "sync_file_range",
- "syncfs",
- "sysinfo",
- "syslog",
- "tee",
- "tgkill",
- "time",
- "timer_create",
- "timer_delete",
- "timer_getoverrun",
- "timer_gettime",
- "timer_settime",
- "timerfd_create",
- "timerfd_gettime",
- "timerfd_settime",
- "times",
- "tkill",
- "truncate",
- "truncate64",
- "ugetrlimit",
- "umask",
- "umount",
- "umount2",
- "uname",
- "unlink",
- "unlinkat",
- "unshare",
- "utime",
- "utimensat",
- "utimes",
- "vfork",
- "vmsplice",
- "wait4",
- "waitid",
- "waitpid",
- "write",
- "writev"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {},
- "excludes": {}
- },
- {
- "names": [
- "personality"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 0,
- "value": 0,
- "valueTwo": 0,
- "op": "SCMP_CMP_EQ"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {}
- },
- {
- "names": [
- "personality"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 0,
- "value": 8,
- "valueTwo": 0,
- "op": "SCMP_CMP_EQ"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {}
- },
- {
- "names": [
- "personality"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 0,
- "value": 131072,
- "valueTwo": 0,
- "op": "SCMP_CMP_EQ"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {}
- },
- {
- "names": [
- "personality"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 0,
- "value": 131080,
- "valueTwo": 0,
- "op": "SCMP_CMP_EQ"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {}
- },
- {
- "names": [
- "personality"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 0,
- "value": 4294967295,
- "valueTwo": 0,
- "op": "SCMP_CMP_EQ"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {}
- },
- {
- "names": [
- "sync_file_range2"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "arches": [
- "ppc64le"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "arm_fadvise64_64",
- "arm_sync_file_range",
- "sync_file_range2",
- "breakpoint",
- "cacheflush",
- "set_tls"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "arches": [
- "arm",
- "arm64"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "arch_prctl"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "arches": [
- "amd64",
- "x32"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "modify_ldt"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "arches": [
- "amd64",
- "x32",
- "x86"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "s390_pci_mmio_read",
- "s390_pci_mmio_write",
- "s390_runtime_instr"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "arches": [
- "s390",
- "s390x"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "open_by_handle_at"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_DAC_READ_SEARCH"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "bpf",
- "clone",
- "fanotify_init",
- "lookup_dcookie",
- "mount",
- "name_to_handle_at",
- "perf_event_open",
- "quotactl",
- "setdomainname",
- "sethostname",
- "setns",
- "umount",
- "umount2",
- "unshare"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_ADMIN"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "clone"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 0,
- "value": 2080505856,
- "valueTwo": 0,
- "op": "SCMP_CMP_MASKED_EQ"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {
- "caps": [
- "CAP_SYS_ADMIN"
- ],
- "arches": [
- "s390",
- "s390x"
- ]
- }
- },
- {
- "names": [
- "clone"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 1,
- "value": 2080505856,
- "valueTwo": 0,
- "op": "SCMP_CMP_MASKED_EQ"
- }
- ],
- "comment": "s390 parameter ordering for clone is different",
- "includes": {
- "arches": [
- "s390",
- "s390x"
- ]
- },
- "excludes": {
- "caps": [
- "CAP_SYS_ADMIN"
- ]
- }
- },
- {
- "names": [
- "reboot"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_BOOT"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "chroot"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_CHROOT"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "delete_module",
- "init_module",
- "finit_module",
- "query_module"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_MODULE"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "get_mempolicy",
- "mbind",
- "name_to_handle_at",
- "set_mempolicy"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_NICE"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "acct"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_PACCT"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "kcmp",
- "process_vm_readv",
- "process_vm_writev",
- "ptrace"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_PTRACE"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "iopl",
- "ioperm"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_RAWIO"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "settimeofday",
- "stime",
- "clock_settime"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_TIME"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "vhangup"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [],
- "comment": "",
- "includes": {
- "caps": [
- "CAP_SYS_TTY_CONFIG"
- ]
- },
- "excludes": {}
- },
- {
- "names": [
- "socket"
- ],
- "action": "SCMP_ACT_ERRNO",
- "args": [
- {
- "index": 0,
- "value": 16,
- "valueTwo": 0,
- "op": "SCMP_CMP_EQ"
- },
- {
- "index": 2,
- "value": 9,
- "valueTwo": 0,
- "op": "SCMP_CMP_EQ"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {
- "caps": [
- "CAP_AUDIT_WRITE"
- ]
- },
- "errnoRet": 22
- },
- {
- "names": [
- "socket"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 2,
- "value": 9,
- "valueTwo": 0,
- "op": "SCMP_CMP_NE"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {
- "caps": [
- "CAP_AUDIT_WRITE"
- ]
- }
- },
- {
- "names": [
- "socket"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 0,
- "value": 16,
- "valueTwo": 0,
- "op": "SCMP_CMP_NE"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {
- "caps": [
- "CAP_AUDIT_WRITE"
- ]
- }
- },
- {
- "names": [
- "socket"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": [
- {
- "index": 2,
- "value": 9,
- "valueTwo": 0,
- "op": "SCMP_CMP_NE"
- }
- ],
- "comment": "",
- "includes": {},
- "excludes": {
- "caps": [
- "CAP_AUDIT_WRITE"
- ]
- }
- },
- {
- "names": [
- "socket"
- ],
- "action": "SCMP_ACT_ALLOW",
- "args": null,
- "comment": "",
- "includes": {
- "caps": [
- "CAP_AUDIT_WRITE"
- ]
- },
- "excludes": {}
- }
- ]
-} \ No newline at end of file
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go b/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go
deleted file mode 100644
index 86c73bf99..000000000
--- a/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go
+++ /dev/null
@@ -1,744 +0,0 @@
-// +build seccomp
-
-// SPDX-License-Identifier: Apache-2.0
-
-// Copyright 2013-2018 Docker, Inc.
-
-package seccomp // import "github.com/seccomp/containers-golang"
-
-import (
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-func arches() []Architecture {
- return []Architecture{
- {
- Arch: ArchX86_64,
- SubArches: []Arch{ArchX86, ArchX32},
- },
- {
- Arch: ArchAARCH64,
- SubArches: []Arch{ArchARM},
- },
- {
- Arch: ArchMIPS64,
- SubArches: []Arch{ArchMIPS, ArchMIPS64N32},
- },
- {
- Arch: ArchMIPS64N32,
- SubArches: []Arch{ArchMIPS, ArchMIPS64},
- },
- {
- Arch: ArchMIPSEL64,
- SubArches: []Arch{ArchMIPSEL, ArchMIPSEL64N32},
- },
- {
- Arch: ArchMIPSEL64N32,
- SubArches: []Arch{ArchMIPSEL, ArchMIPSEL64},
- },
- {
- Arch: ArchS390X,
- SubArches: []Arch{ArchS390},
- },
- }
-}
-
-// DefaultProfile defines the allowlist for the default seccomp profile.
-func DefaultProfile() *Seccomp {
- einval := uint(syscall.EINVAL)
-
- syscalls := []*Syscall{
- {
- Names: []string{
- "_llseek",
- "_newselect",
- "accept",
- "accept4",
- "access",
- "adjtimex",
- "alarm",
- "bind",
- "brk",
- "capget",
- "capset",
- "chdir",
- "chmod",
- "chown",
- "chown32",
- "clock_getres",
- "clock_gettime",
- "clock_nanosleep",
- "close",
- "connect",
- "copy_file_range",
- "creat",
- "dup",
- "dup2",
- "dup3",
- "epoll_create",
- "epoll_create1",
- "epoll_ctl",
- "epoll_ctl_old",
- "epoll_pwait",
- "epoll_wait",
- "epoll_wait_old",
- "eventfd",
- "eventfd2",
- "execve",
- "execveat",
- "exit",
- "exit_group",
- "faccessat",
- "fadvise64",
- "fadvise64_64",
- "fallocate",
- "fanotify_mark",
- "fchdir",
- "fchmod",
- "fchmodat",
- "fchown",
- "fchown32",
- "fchownat",
- "fcntl",
- "fcntl64",
- "fdatasync",
- "fgetxattr",
- "flistxattr",
- "flock",
- "fork",
- "fremovexattr",
- "fsetxattr",
- "fstat",
- "fstat64",
- "fstatat64",
- "fstatfs",
- "fstatfs64",
- "fsync",
- "ftruncate",
- "ftruncate64",
- "futex",
- "futimesat",
- "get_robust_list",
- "get_thread_area",
- "getcpu",
- "getcwd",
- "getdents",
- "getdents64",
- "getegid",
- "getegid32",
- "geteuid",
- "geteuid32",
- "getgid",
- "getgid32",
- "getgroups",
- "getgroups32",
- "getitimer",
- "getpeername",
- "getpgid",
- "getpgrp",
- "getpid",
- "getppid",
- "getpriority",
- "getrandom",
- "getresgid",
- "getresgid32",
- "getresuid",
- "getresuid32",
- "getrlimit",
- "getrusage",
- "getsid",
- "getsockname",
- "getsockopt",
- "gettid",
- "gettimeofday",
- "getuid",
- "getuid32",
- "getxattr",
- "inotify_add_watch",
- "inotify_init",
- "inotify_init1",
- "inotify_rm_watch",
- "io_cancel",
- "io_destroy",
- "io_getevents",
- "io_setup",
- "io_submit",
- "ioctl",
- "ioprio_get",
- "ioprio_set",
- "ipc",
- "kill",
- "lchown",
- "lchown32",
- "lgetxattr",
- "link",
- "linkat",
- "listen",
- "listxattr",
- "llistxattr",
- "lremovexattr",
- "lseek",
- "lsetxattr",
- "lstat",
- "lstat64",
- "madvise",
- "memfd_create",
- "mincore",
- "mkdir",
- "mkdirat",
- "mknod",
- "mknodat",
- "mlock",
- "mlock2",
- "mlockall",
- "mmap",
- "mmap2",
- "mount",
- "mprotect",
- "mq_getsetattr",
- "mq_notify",
- "mq_open",
- "mq_timedreceive",
- "mq_timedsend",
- "mq_unlink",
- "mremap",
- "msgctl",
- "msgget",
- "msgrcv",
- "msgsnd",
- "msync",
- "munlock",
- "munlockall",
- "munmap",
- "name_to_handle_at",
- "nanosleep",
- "newfstatat",
- "open",
- "openat",
- "pause",
- "pipe",
- "pipe2",
- "poll",
- "ppoll",
- "prctl",
- "pread64",
- "preadv",
- "preadv2",
- "prlimit64",
- "pselect6",
- "pwrite64",
- "pwritev",
- "pwritev2",
- "read",
- "readahead",
- "readlink",
- "readlinkat",
- "readv",
- "reboot",
- "recv",
- "recvfrom",
- "recvmmsg",
- "recvmsg",
- "remap_file_pages",
- "removexattr",
- "rename",
- "renameat",
- "renameat2",
- "restart_syscall",
- "rmdir",
- "rt_sigaction",
- "rt_sigpending",
- "rt_sigprocmask",
- "rt_sigqueueinfo",
- "rt_sigreturn",
- "rt_sigsuspend",
- "rt_sigtimedwait",
- "rt_tgsigqueueinfo",
- "sched_get_priority_max",
- "sched_get_priority_min",
- "sched_getaffinity",
- "sched_getattr",
- "sched_getparam",
- "sched_getscheduler",
- "sched_rr_get_interval",
- "sched_setaffinity",
- "sched_setattr",
- "sched_setparam",
- "sched_setscheduler",
- "sched_yield",
- "seccomp",
- "select",
- "semctl",
- "semget",
- "semop",
- "semtimedop",
- "send",
- "sendfile",
- "sendfile64",
- "sendmmsg",
- "sendmsg",
- "sendto",
- "set_robust_list",
- "set_thread_area",
- "set_tid_address",
- "setfsgid",
- "setfsgid32",
- "setfsuid",
- "setfsuid32",
- "setgid",
- "setgid32",
- "setgroups",
- "setgroups32",
- "setitimer",
- "setpgid",
- "setpriority",
- "setregid",
- "setregid32",
- "setresgid",
- "setresgid32",
- "setresuid",
- "setresuid32",
- "setreuid",
- "setreuid32",
- "setrlimit",
- "setsid",
- "setsockopt",
- "setuid",
- "setuid32",
- "setxattr",
- "shmat",
- "shmctl",
- "shmdt",
- "shmget",
- "shutdown",
- "sigaltstack",
- "signalfd",
- "signalfd4",
- "sigreturn",
- "socketcall",
- "socketpair",
- "splice",
- "stat",
- "stat64",
- "statfs",
- "statfs64",
- "statx",
- "symlink",
- "symlinkat",
- "sync",
- "sync_file_range",
- "syncfs",
- "sysinfo",
- "syslog",
- "tee",
- "tgkill",
- "time",
- "timer_create",
- "timer_delete",
- "timer_getoverrun",
- "timer_gettime",
- "timer_settime",
- "timerfd_create",
- "timerfd_gettime",
- "timerfd_settime",
- "times",
- "tkill",
- "truncate",
- "truncate64",
- "ugetrlimit",
- "umask",
- "umount",
- "umount2",
- "uname",
- "unlink",
- "unlinkat",
- "unshare",
- "utime",
- "utimensat",
- "utimes",
- "vfork",
- "vmsplice",
- "wait4",
- "waitid",
- "waitpid",
- "write",
- "writev",
- },
- Action: ActAllow,
- Args: []*Arg{},
- },
- {
- Names: []string{"personality"},
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 0,
- Value: 0x0,
- Op: OpEqualTo,
- },
- },
- },
- {
- Names: []string{"personality"},
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 0,
- Value: 0x0008,
- Op: OpEqualTo,
- },
- },
- },
- {
- Names: []string{"personality"},
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 0,
- Value: 0x20000,
- Op: OpEqualTo,
- },
- },
- },
- {
- Names: []string{"personality"},
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 0,
- Value: 0x20008,
- Op: OpEqualTo,
- },
- },
- },
- {
- Names: []string{"personality"},
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 0,
- Value: 0xffffffff,
- Op: OpEqualTo,
- },
- },
- },
- {
- Names: []string{
- "sync_file_range2",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Arches: []string{"ppc64le"},
- },
- },
- {
- Names: []string{
- "arm_fadvise64_64",
- "arm_sync_file_range",
- "sync_file_range2",
- "breakpoint",
- "cacheflush",
- "set_tls",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Arches: []string{"arm", "arm64"},
- },
- },
- {
- Names: []string{
- "arch_prctl",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Arches: []string{"amd64", "x32"},
- },
- },
- {
- Names: []string{
- "modify_ldt",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Arches: []string{"amd64", "x32", "x86"},
- },
- },
- {
- Names: []string{
- "s390_pci_mmio_read",
- "s390_pci_mmio_write",
- "s390_runtime_instr",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Arches: []string{"s390", "s390x"},
- },
- },
- {
- Names: []string{
- "open_by_handle_at",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_DAC_READ_SEARCH"},
- },
- },
- {
- Names: []string{
- "bpf",
- "clone",
- "fanotify_init",
- "lookup_dcookie",
- "mount",
- "name_to_handle_at",
- "perf_event_open",
- "quotactl",
- "setdomainname",
- "sethostname",
- "setns",
- "umount",
- "umount2",
- "unshare",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_ADMIN"},
- },
- },
- {
- Names: []string{
- "clone",
- },
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 0,
- Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET,
- ValueTwo: 0,
- Op: OpMaskedEqual,
- },
- },
- Excludes: Filter{
- Caps: []string{"CAP_SYS_ADMIN"},
- Arches: []string{"s390", "s390x"},
- },
- },
- {
- Names: []string{
- "clone",
- },
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 1,
- Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET,
- ValueTwo: 0,
- Op: OpMaskedEqual,
- },
- },
- Comment: "s390 parameter ordering for clone is different",
- Includes: Filter{
- Arches: []string{"s390", "s390x"},
- },
- Excludes: Filter{
- Caps: []string{"CAP_SYS_ADMIN"},
- },
- },
- {
- Names: []string{
- "reboot",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_BOOT"},
- },
- },
- {
- Names: []string{
- "chroot",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_CHROOT"},
- },
- },
- {
- Names: []string{
- "delete_module",
- "init_module",
- "finit_module",
- "query_module",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_MODULE"},
- },
- },
- {
- Names: []string{
- "get_mempolicy",
- "mbind",
- "name_to_handle_at",
- "set_mempolicy",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_NICE"},
- },
- },
- {
- Names: []string{
- "acct",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_PACCT"},
- },
- },
- {
- Names: []string{
- "kcmp",
- "process_vm_readv",
- "process_vm_writev",
- "ptrace",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_PTRACE"},
- },
- },
- {
- Names: []string{
- "iopl",
- "ioperm",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_RAWIO"},
- },
- },
- {
- Names: []string{
- "settimeofday",
- "stime",
- "clock_settime",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_TIME"},
- },
- },
- {
- Names: []string{
- "vhangup",
- },
- Action: ActAllow,
- Args: []*Arg{},
- Includes: Filter{
- Caps: []string{"CAP_SYS_TTY_CONFIG"},
- },
- },
- {
- Names: []string{
- "socket",
- },
- Action: ActErrno,
- ErrnoRet: &einval,
- Args: []*Arg{
- {
- Index: 0,
- Value: syscall.AF_NETLINK,
- Op: OpEqualTo,
- },
- {
- Index: 2,
- Value: syscall.NETLINK_AUDIT,
- Op: OpEqualTo,
- },
- },
- Excludes: Filter{
- Caps: []string{"CAP_AUDIT_WRITE"},
- },
- },
- {
- Names: []string{
- "socket",
- },
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 2,
- Value: syscall.NETLINK_AUDIT,
- Op: OpNotEqual,
- },
- },
- Excludes: Filter{
- Caps: []string{"CAP_AUDIT_WRITE"},
- },
- },
- {
- Names: []string{
- "socket",
- },
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 0,
- Value: syscall.AF_NETLINK,
- Op: OpNotEqual,
- },
- },
- Excludes: Filter{
- Caps: []string{"CAP_AUDIT_WRITE"},
- },
- },
- {
- Names: []string{
- "socket",
- },
- Action: ActAllow,
- Args: []*Arg{
- {
- Index: 2,
- Value: syscall.NETLINK_AUDIT,
- Op: OpNotEqual,
- },
- },
- Excludes: Filter{
- Caps: []string{"CAP_AUDIT_WRITE"},
- },
- },
- {
- Names: []string{
- "socket",
- },
- Action: ActAllow,
- Includes: Filter{
- Caps: []string{"CAP_AUDIT_WRITE"},
- },
- },
- }
-
- return &Seccomp{
- DefaultAction: ActErrno,
- ArchMap: arches(),
- Syscalls: syscalls,
- }
-}
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp_linux.go b/vendor/github.com/seccomp/containers-golang/seccomp_linux.go
deleted file mode 100644
index 44dcd90b8..000000000
--- a/vendor/github.com/seccomp/containers-golang/seccomp_linux.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// +build seccomp
-
-// SPDX-License-Identifier: Apache-2.0
-
-// Copyright 2013-2018 Docker, Inc.
-
-package seccomp // import "github.com/seccomp/containers-golang"
-
-import (
- "encoding/json"
- "errors"
- "fmt"
-
- "github.com/opencontainers/runtime-spec/specs-go"
- libseccomp "github.com/seccomp/libseccomp-golang"
- "golang.org/x/sys/unix"
-)
-
-//go:generate go run -tags 'seccomp' generate.go
-
-// GetDefaultProfile returns the default seccomp profile.
-func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- return setupSeccomp(DefaultProfile(), rs)
-}
-
-// LoadProfile takes a json string and decodes the seccomp profile.
-func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- var config Seccomp
- if err := json.Unmarshal([]byte(body), &config); err != nil {
- return nil, fmt.Errorf("decoding seccomp profile failed: %v", err)
- }
- return setupSeccomp(&config, rs)
-}
-
-// LoadProfileFromBytes takes a byte slice and decodes the seccomp profile.
-func LoadProfileFromBytes(body []byte, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- config := &Seccomp{}
- if err := json.Unmarshal(body, config); err != nil {
- return nil, fmt.Errorf("decoding seccomp profile failed: %v", err)
- }
- return setupSeccomp(config, rs)
-}
-
-// LoadProfileFromConfig takes a Seccomp struct and a spec to retrieve a LinuxSeccomp
-func LoadProfileFromConfig(config *Seccomp, specgen *specs.Spec) (*specs.LinuxSeccomp, error) {
- return setupSeccomp(config, specgen)
-}
-
-var nativeToSeccomp = map[string]Arch{
- "amd64": ArchX86_64,
- "arm64": ArchAARCH64,
- "mips64": ArchMIPS64,
- "mips64n32": ArchMIPS64N32,
- "mipsel64": ArchMIPSEL64,
- "mipsel64n32": ArchMIPSEL64N32,
- "s390x": ArchS390X,
-}
-
-// inSlice tests whether a string is contained in a slice of strings or not.
-// Comparison is case sensitive
-func inSlice(slice []string, s string) bool {
- for _, ss := range slice {
- if s == ss {
- return true
- }
- }
- return false
-}
-
-func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- if config == nil {
- return nil, nil
- }
-
- // No default action specified, no syscalls listed, assume seccomp disabled
- if config.DefaultAction == "" && len(config.Syscalls) == 0 {
- return nil, nil
- }
-
- newConfig := &specs.LinuxSeccomp{}
-
- var arch string
- var native, err = libseccomp.GetNativeArch()
- if err == nil {
- arch = native.String()
- }
-
- if len(config.Architectures) != 0 && len(config.ArchMap) != 0 {
- return nil, errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'")
- }
-
- // if config.Architectures == 0 then libseccomp will figure out the architecture to use
- if len(config.Architectures) != 0 {
- for _, a := range config.Architectures {
- newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a))
- }
- }
-
- if len(config.ArchMap) != 0 {
- for _, a := range config.ArchMap {
- seccompArch, ok := nativeToSeccomp[arch]
- if ok {
- if a.Arch == seccompArch {
- newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a.Arch))
- for _, sa := range a.SubArches {
- newConfig.Architectures = append(newConfig.Architectures, specs.Arch(sa))
- }
- break
- }
- }
- }
- }
-
- newConfig.DefaultAction = specs.LinuxSeccompAction(config.DefaultAction)
-
-Loop:
- // Loop through all syscall blocks and convert them to libcontainer format after filtering them
- for _, call := range config.Syscalls {
- if len(call.Excludes.Arches) > 0 {
- if inSlice(call.Excludes.Arches, arch) {
- continue Loop
- }
- }
- if len(call.Excludes.Caps) > 0 {
- for _, c := range call.Excludes.Caps {
- if inSlice(rs.Process.Capabilities.Bounding, c) {
- continue Loop
- }
- }
- }
- if len(call.Includes.Arches) > 0 {
- if !inSlice(call.Includes.Arches, arch) {
- continue Loop
- }
- }
- if len(call.Includes.Caps) > 0 {
- for _, c := range call.Includes.Caps {
- if !inSlice(rs.Process.Capabilities.Bounding, c) {
- continue Loop
- }
- }
- }
-
- if call.Name != "" && len(call.Names) != 0 {
- return nil, errors.New("'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'")
- }
-
- if call.Name != "" {
- newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall([]string{call.Name}, call.Action, call.Args, call.ErrnoRet))
- }
-
- if len(call.Names) > 0 {
- newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Names, call.Action, call.Args, call.ErrnoRet))
- }
- }
-
- return newConfig, nil
-}
-
-func createSpecsSyscall(names []string, action Action, args []*Arg, errnoRet *uint) specs.LinuxSyscall {
- newCall := specs.LinuxSyscall{
- Names: names,
- Action: specs.LinuxSeccompAction(action),
- ErrnoRet: errnoRet,
- }
-
- // Loop through all the arguments of the syscall and convert them
- for _, arg := range args {
- newArg := specs.LinuxSeccompArg{
- Index: arg.Index,
- Value: arg.Value,
- ValueTwo: arg.ValueTwo,
- Op: specs.LinuxSeccompOperator(arg.Op),
- }
-
- newCall.Args = append(newCall.Args, newArg)
- }
- return newCall
-}
-
-// IsEnabled returns true if seccomp is enabled for the host.
-func IsEnabled() bool {
- // Check if Seccomp is supported, via CONFIG_SECCOMP.
- if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL {
- // Make sure the kernel has CONFIG_SECCOMP_FILTER.
- if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go b/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go
deleted file mode 100644
index 763f22982..000000000
--- a/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// +build !seccomp
-
-// SPDX-License-Identifier: Apache-2.0
-
-// Copyright 2013-2018 Docker, Inc.
-
-package seccomp // import "github.com/seccomp/containers-golang"
-
-import (
- "errors"
-
- "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-var errNotSupported = errors.New("seccomp not enabled in this build")
-
-// DefaultProfile returns a nil pointer on unsupported systems.
-func DefaultProfile() *Seccomp {
- return nil
-}
-
-// LoadProfile returns an error on unsuppored systems
-func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- return nil, errNotSupported
-}
-
-// GetDefaultProfile returns an error on unsuppored systems
-func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- return nil, errNotSupported
-}
-
-// LoadProfileFromBytes takes a byte slice and decodes the seccomp profile.
-func LoadProfileFromBytes(body []byte, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- return nil, errNotSupported
-}
-
-// LoadProfileFromConfig takes a Seccomp struct and a spec to retrieve a LinuxSeccomp
-func LoadProfileFromConfig(config *Seccomp, specgen *specs.Spec) (*specs.LinuxSeccomp, error) {
- return nil, errNotSupported
-}
-
-// IsEnabled returns true if seccomp is enabled for the host.
-func IsEnabled() bool {
- return false
-}
diff --git a/vendor/github.com/seccomp/containers-golang/types.go b/vendor/github.com/seccomp/containers-golang/types.go
deleted file mode 100644
index 6651c423f..000000000
--- a/vendor/github.com/seccomp/containers-golang/types.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package seccomp // import "github.com/seccomp/containers-golang"
-
-// SPDX-License-Identifier: Apache-2.0
-
-// Copyright 2013-2018 Docker, Inc.
-
-// Seccomp represents the config for a seccomp profile for syscall restriction.
-type Seccomp struct {
- DefaultAction Action `json:"defaultAction"`
- // Architectures is kept to maintain backward compatibility with the old
- // seccomp profile.
- Architectures []Arch `json:"architectures,omitempty"`
- ArchMap []Architecture `json:"archMap,omitempty"`
- Syscalls []*Syscall `json:"syscalls"`
-}
-
-// Architecture is used to represent a specific architecture
-// and its sub-architectures
-type Architecture struct {
- Arch Arch `json:"architecture"`
- SubArches []Arch `json:"subArchitectures"`
-}
-
-// Arch used for architectures
-type Arch string
-
-// Additional architectures permitted to be used for system calls
-// By default only the native architecture of the kernel is permitted
-const (
- ArchX86 Arch = "SCMP_ARCH_X86"
- ArchX86_64 Arch = "SCMP_ARCH_X86_64"
- ArchX32 Arch = "SCMP_ARCH_X32"
- ArchARM Arch = "SCMP_ARCH_ARM"
- ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
- ArchMIPS Arch = "SCMP_ARCH_MIPS"
- ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
- ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
- ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
- ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
- ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
- ArchPPC Arch = "SCMP_ARCH_PPC"
- ArchPPC64 Arch = "SCMP_ARCH_PPC64"
- ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
- ArchS390 Arch = "SCMP_ARCH_S390"
- ArchS390X Arch = "SCMP_ARCH_S390X"
-)
-
-// Action taken upon Seccomp rule match
-type Action string
-
-// Define actions for Seccomp rules
-const (
- ActKill Action = "SCMP_ACT_KILL"
- ActTrap Action = "SCMP_ACT_TRAP"
- ActErrno Action = "SCMP_ACT_ERRNO"
- ActTrace Action = "SCMP_ACT_TRACE"
- ActAllow Action = "SCMP_ACT_ALLOW"
-)
-
-// Operator used to match syscall arguments in Seccomp
-type Operator string
-
-// Define operators for syscall arguments in Seccomp
-const (
- OpNotEqual Operator = "SCMP_CMP_NE"
- OpLessThan Operator = "SCMP_CMP_LT"
- OpLessEqual Operator = "SCMP_CMP_LE"
- OpEqualTo Operator = "SCMP_CMP_EQ"
- OpGreaterEqual Operator = "SCMP_CMP_GE"
- OpGreaterThan Operator = "SCMP_CMP_GT"
- OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
-)
-
-// Arg used for matching specific syscall arguments in Seccomp
-type Arg struct {
- Index uint `json:"index"`
- Value uint64 `json:"value"`
- ValueTwo uint64 `json:"valueTwo"`
- Op Operator `json:"op"`
-}
-
-// Filter is used to conditionally apply Seccomp rules
-type Filter struct {
- Caps []string `json:"caps,omitempty"`
- Arches []string `json:"arches,omitempty"`
-}
-
-// Syscall is used to match a group of syscalls in Seccomp
-type Syscall struct {
- Name string `json:"name,omitempty"`
- Names []string `json:"names,omitempty"`
- Action Action `json:"action"`
- Args []*Arg `json:"args"`
- Comment string `json:"comment"`
- Includes Filter `json:"includes"`
- Excludes Filter `json:"excludes"`
- ErrnoRet *uint `json:"errnoRet,omitempty"`
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 7af0f1110..ffd90f5a5 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -66,10 +66,11 @@ github.com/containernetworking/plugins/pkg/utils/hwaddr
github.com/containernetworking/plugins/pkg/utils/sysctl
github.com/containernetworking/plugins/plugins/ipam/host-local/backend
github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator
-# github.com/containers/buildah v1.15.1-0.20200813183340-0a8dc1f8064c
+# github.com/containers/buildah v1.16.1
github.com/containers/buildah
github.com/containers/buildah/bind
github.com/containers/buildah/chroot
+github.com/containers/buildah/copier
github.com/containers/buildah/docker
github.com/containers/buildah/imagebuildah
github.com/containers/buildah/manifests
@@ -80,11 +81,12 @@ github.com/containers/buildah/pkg/formats
github.com/containers/buildah/pkg/manifests
github.com/containers/buildah/pkg/overlay
github.com/containers/buildah/pkg/parse
+github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/secrets
github.com/containers/buildah/pkg/supplemented
github.com/containers/buildah/pkg/umask
github.com/containers/buildah/util
-# github.com/containers/common v0.21.0
+# github.com/containers/common v0.22.0
github.com/containers/common/pkg/apparmor
github.com/containers/common/pkg/apparmor/internal/supported
github.com/containers/common/pkg/auth
@@ -486,8 +488,6 @@ github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udpproxy
github.com/rootless-containers/rootlesskit/pkg/port/portutil
# github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8
github.com/safchain/ethtool
-# github.com/seccomp/containers-golang v0.6.0
-github.com/seccomp/containers-golang
# github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf
github.com/seccomp/libseccomp-golang
# github.com/sirupsen/logrus v1.6.0