diff options
138 files changed, 1576 insertions, 1135 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index 1b824909c..eda03bf23 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -329,7 +329,7 @@ static_alt_build_task: - build # Community-maintained task, may fail on occasion. If so, uncomment # the next line and file an issue with details about the failure. - allow_failures: $CI == $CI + # allow_failures: $CI == $CI gce_instance: *bigvm env: <<: *stdenvars @@ -338,14 +338,13 @@ static_alt_build_task: ALT_NAME: 'Static build' # Do not use 'latest', fixed-version tag for runtime stability. CTR_FQIN: "docker.io/nixos/nix:2.3.6" - # This is critical, it helps to avoid a very lengthy process of - # statically building every dependency needed to build podman. - # Assuming the dependency and build description hasn't changed, - # this cache ensures only the static podman binary is built. - nix_cache: - folder: '/var/cache/nix' - # Cirrus will calculate/use sha of this output as the cache key - fingerprint_script: echo "${IMAGE_SUFFIX}" && cat nix/* + # Authentication token for pushing the build cache to cachix. + # This is critical, it helps to avoid a very lengthy process of + # statically building every dependency needed to build podman. + # Assuming the pinned nix dependencies in nix/nixpkgs.json have not + # changed, this cache will ensure that only the static podman binary is + # built. + CACHIX_AUTH_TOKEN: ENCRYPTED[df0d4d0a67474e8ea49cc503221dcb912b7e2ba45c8ec4bf2e5fd9c49a18ac21c24bacee59b5393355ed9e4358d2baef] setup_script: *setup main_script: *main always: *binary_artifacts diff --git a/build_osx.md b/build_osx.md new file mode 100644 index 000000000..59e1797a6 --- /dev/null +++ b/build_osx.md @@ -0,0 +1,55 @@ +# Building the Podman client on macOS + +The following describes the process for building the Podman client on macOS. + +## Install brew +Podman requires brew -- a package manager for macOS. This will allow additional packages to be installed that are +needed by Podman. See the [brew project page](https://brew.sh/) for installation instructions. + +##Install build dependencies +Podman requires some software from brew to be able to build. This can be done using brew from a macOS terminal: + +``` +$ brew install go go-md2man +``` + +## Obtain Podman source code + +You can obtain the latest source code for Podman from its github repository. + +``` +$ git clone http://github.com/containers/podman go/src/github.com/containers/podman +``` + +## Build client +After completing the preparatory steps of obtaining the Podman source code and installing its dependencies, the client +can now be built. + +``` +$ cd go/src/github.com/containers/podman +$ make podman-remote-darwin +$ mv bin/podman-remote-darwin bin/podman +``` + +The binary will be located in bin/ +``` +$ ls -l bin/ +``` + +If you would like to build the docs associated with Podman on macOS: +``` +$ make install-podman-remote-darwin-docs +$ ls docs/build/remote/darwin +``` + +To install and view these manpages: + +``` +$ cp -a docs/build/remote/darwin/* /usr/share/man/man1 +$ man podman +``` + +## Using the client + +To learn how to use the Podman client, refer its +[tutorial](https://github.com/containers/podman/blob/master/docs/tutorials/remote_client.md). diff --git a/cmd/podman/common/create_opts.go b/cmd/podman/common/create_opts.go index a4da8da9e..d86a6d364 100644 --- a/cmd/podman/common/create_opts.go +++ b/cmd/podman/common/create_opts.go @@ -3,6 +3,7 @@ package common import ( "fmt" "net" + "path/filepath" "strconv" "strings" @@ -383,8 +384,29 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup } // volumes - if volumes := cc.HostConfig.Binds; len(volumes) > 0 { - cliOpts.Volume = volumes + volDestinations := make(map[string]bool) + for _, vol := range cc.HostConfig.Binds { + cliOpts.Volume = append(cliOpts.Volume, vol) + // Extract the destination so we don't add duplicate mounts in + // the volumes phase. + splitVol := strings.SplitN(vol, ":", 3) + switch len(splitVol) { + case 1: + volDestinations[vol] = true + default: + volDestinations[splitVol[1]] = true + } + } + // Anonymous volumes are added differently from other volumes, in their + // own special field, for reasons known only to Docker. Still use the + // format of `-v` so we can just append them in there. + // Unfortunately, these may be duplicates of existing mounts in Binds. + // So... We need to catch that. + for vol := range cc.Volumes { + if _, ok := volDestinations[filepath.Clean(vol)]; ok { + continue + } + cliOpts.Volume = append(cliOpts.Volume, vol) } if len(cc.HostConfig.BlkioWeightDevice) > 0 { devices := make([]string, 0, len(cc.HostConfig.BlkioWeightDevice)) diff --git a/cmd/podman/containers/ps.go b/cmd/podman/containers/ps.go index d23771fc5..31f44d92f 100644 --- a/cmd/podman/containers/ps.go +++ b/cmd/podman/containers/ps.go @@ -78,7 +78,7 @@ func listFlagSet(cmd *cobra.Command) { flags := cmd.Flags() flags.BoolVarP(&listOpts.All, "all", "a", false, "Show all the containers, default is only running containers") - flags.BoolVar(&listOpts.Storage, "external", false, "Show containers in storage not controlled by Podman") + flags.BoolVar(&listOpts.External, "external", false, "Show containers in storage not controlled by Podman") filterFlagName := "filter" flags.StringSliceVarP(&filters, filterFlagName, "f", []string{}, "Filter output based on conditions given") @@ -132,10 +132,10 @@ func checkFlags(c *cobra.Command) error { } cfg := registry.PodmanConfig() if cfg.Engine.Namespace != "" { - if c.Flag("storage").Changed && listOpts.Storage { - return errors.New("--namespace and --storage flags can not both be set") + if c.Flag("storage").Changed && listOpts.External { + return errors.New("--namespace and --external flags can not both be set") } - listOpts.Storage = false + listOpts.External = false } return nil diff --git a/cmd/podman/containers/rm.go b/cmd/podman/containers/rm.go index ea616b6e5..884ad05f4 100644 --- a/cmd/podman/containers/rm.go +++ b/cmd/podman/containers/rm.go @@ -140,6 +140,10 @@ func removeContainers(namesOrIDs []string, rmOptions entities.RmOptions, setExit } func setExitCode(err error) { + // If error is set to no such container, do not reset + if registry.GetExitCode() == 1 { + return + } cause := errors.Cause(err) switch { case cause == define.ErrNoSuchCtr: diff --git a/cmd/podman/images/build.go b/cmd/podman/images/build.go index 4219e325b..1f06dace9 100644 --- a/cmd/podman/images/build.go +++ b/cmd/podman/images/build.go @@ -106,7 +106,9 @@ func buildFlags(cmd *cobra.Command) { logrus.Errorf("unable to set --pull to true: %v", err) } flag.DefValue = "true" + flag.Usage = "Always attempt to pull the image (errors are fatal)" flags.AddFlagSet(&budFlags) + // Add the completion functions budCompletions := buildahCLI.GetBudFlagsCompletions() completion.CompleteCommandFlags(cmd, budCompletions) diff --git a/cmd/podman/images/history.go b/cmd/podman/images/history.go index 964c7a975..af40dd73a 100644 --- a/cmd/podman/images/history.go +++ b/cmd/podman/images/history.go @@ -162,7 +162,7 @@ func (h historyReporter) Size() string { } func (h historyReporter) CreatedBy() string { - if len(h.ImageHistoryLayer.CreatedBy) > 45 { + if !opts.noTrunc && len(h.ImageHistoryLayer.CreatedBy) > 45 { return h.ImageHistoryLayer.CreatedBy[:45-3] + "..." } return h.ImageHistoryLayer.CreatedBy diff --git a/cmd/podman/images/push.go b/cmd/podman/images/push.go index d53a9c066..eccf93e57 100644 --- a/cmd/podman/images/push.go +++ b/cmd/podman/images/push.go @@ -98,7 +98,7 @@ func pushFlags(cmd *cobra.Command) { _ = cmd.RegisterFlagCompletionFunc(digestfileFlagName, completion.AutocompleteDefault) formatFlagName := "format" - flags.StringVarP(&pushOptions.Format, formatFlagName, "f", "", "Manifest type (oci, v2s1, or v2s2) to use when pushing an image using the 'dir' transport (default is manifest type of source)") + flags.StringVarP(&pushOptions.Format, formatFlagName, "f", "", "Manifest type (oci, v2s2, or v2s1) to use when pushing an image using the 'dir' transport (default is manifest type of source)") _ = cmd.RegisterFlagCompletionFunc(formatFlagName, common.AutocompleteManifestFormat) flags.BoolVarP(&pushOptions.Quiet, "quiet", "q", false, "Suppress output information when pushing images") @@ -114,7 +114,10 @@ func pushFlags(cmd *cobra.Command) { if registry.IsRemote() { _ = flags.MarkHidden("cert-dir") _ = flags.MarkHidden("compress") + _ = flags.MarkHidden("digestfile") _ = flags.MarkHidden("quiet") + _ = flags.MarkHidden("remove-signatures") + _ = flags.MarkHidden("sign-by") } _ = flags.MarkHidden("signature-policy") } diff --git a/contrib/cirrus/pr-should-include-tests b/contrib/cirrus/pr-should-include-tests index caf27cf83..a3b4847a7 100755 --- a/contrib/cirrus/pr-should-include-tests +++ b/contrib/cirrus/pr-should-include-tests @@ -39,6 +39,7 @@ filtered_changes=$(git diff --name-status $base $head | egrep -v '^contrib/' | egrep -v '^docs/' | egrep -v '^hack/' | + egrep -v '^nix/' | egrep -v '^vendor/' | egrep -v '^version/') if [[ -z "$filtered_changes" ]]; then diff --git a/contrib/cirrus/required_host_ports.txt b/contrib/cirrus/required_host_ports.txt index 9248e497a..5f066e059 100644 --- a/contrib/cirrus/required_host_ports.txt +++ b/contrib/cirrus/required_host_ports.txt @@ -2,3 +2,4 @@ github.com 22 docker.io 443 quay.io 443 registry.fedoraproject.org 443 +podman.cachix.org 443 diff --git a/contrib/cirrus/runner.sh b/contrib/cirrus/runner.sh index 50d615105..ccbdb63b6 100755 --- a/contrib/cirrus/runner.sh +++ b/contrib/cirrus/runner.sh @@ -228,15 +228,14 @@ function _run_altbuild() { req_env_vars CTR_FQIN [[ "$UID" -eq 0 ]] || \ die "Static build must execute nixos container as root on host" - mkdir -p /var/cache/nix - podman run -i --rm -v /var/cache/nix:/mnt/nix:Z \ - $CTR_FQIN cp -rfT /nix /mnt/nix - podman run -i --rm -v /var/cache/nix:/nix:Z \ - -v $PWD:$PWD:Z -w $PWD $CTR_FQIN \ - nix --print-build-logs --option cores 4 --option max-jobs 4 \ - build --file ./nix/ - # result symlink is absolute from container perspective :( - cp /var/cache/$(readlink result)/bin/podman ./ # for cirrus-ci artifact + podman run -i --rm \ + -e CACHIX_AUTH_TOKEN \ + -v $PWD:$PWD:Z -w $PWD $CTR_FQIN sh -c \ + "nix-env -iA cachix -f https://cachix.org/api/v1/install && \ + cachix use podman && \ + nix-build nix && \ + nix-store -qR --include-outputs \$(nix-instantiate nix/default.nix) | grep -v podman | cachix push podman && \ + cp -R result/bin ." rm result # makes cirrus puke ;; *) diff --git a/contrib/rootless-cni-infra/Containerfile b/contrib/rootless-cni-infra/Containerfile index 871e06a6c..4324f39d2 100644 --- a/contrib/rootless-cni-infra/Containerfile +++ b/contrib/rootless-cni-infra/Containerfile @@ -2,7 +2,7 @@ ARG GOLANG_VERSION=1.15 ARG ALPINE_VERSION=3.12 ARG CNI_VERSION=v0.8.0 ARG CNI_PLUGINS_VERSION=v0.8.7 -ARG DNSNAME_VERSION=v1.0.0 +ARG DNSNAME_VERSION=v1.1.1 FROM golang:${GOLANG_VERSION}-alpine${ALPINE_VERSION} AS golang-base RUN apk add --no-cache git @@ -33,4 +33,4 @@ COPY rootless-cni-infra /usr/local/bin ENV CNI_PATH=/opt/cni/bin CMD ["sleep", "infinity"] -ENV ROOTLESS_CNI_INFRA_VERSION=3 +ENV ROOTLESS_CNI_INFRA_VERSION=5 diff --git a/contrib/rootless-cni-infra/rootless-cni-infra b/contrib/rootless-cni-infra/rootless-cni-infra index 463254c7f..cceb8d817 100755 --- a/contrib/rootless-cni-infra/rootless-cni-infra +++ b/contrib/rootless-cni-infra/rootless-cni-infra @@ -21,16 +21,19 @@ wait_unshare_net() { done } -# CLI subcommand: "alloc $CONTAINER_ID $NETWORK_NAME $POD_NAME" +# CLI subcommand: "alloc $CONTAINER_ID $NETWORK_NAME $POD_NAME $IP $MAC $CAP_ARGS" cmd_entrypoint_alloc() { - if [ "$#" -ne 3 ]; then - echo >&2 "Usage: $ARG0 alloc CONTAINER_ID NETWORK_NAME POD_NAME" + if [ "$#" -ne 6 ]; then + echo >&2 "Usage: $ARG0 alloc CONTAINER_ID NETWORK_NAME POD_NAME IP MAC CAP_ARGS" exit 1 fi ID="$1" NET="$2" K8S_POD_NAME="$3" + IP="$4" + MAC="$5" + CAP_ARGS="$6" dir="${BASE}/${ID}" mkdir -p "${dir}/attached" "${dir}/attached-args" @@ -46,9 +49,18 @@ cmd_entrypoint_alloc() { nsenter -t "${pid}" -n ip link set lo up fi CNI_ARGS="IgnoreUnknown=1;K8S_POD_NAME=${K8S_POD_NAME}" + if [ "$IP" ]; then + CNI_ARGS="$CNI_ARGS;IP=${IP}" + fi + if [ "$MAC" ]; then + CNI_ARGS="$CNI_ARGS;MAC=${MAC}" + fi + if [ "$CAP_ARGS" ]; then + CAP_ARGS="$CAP_ARGS" + fi nwcount=$(find "${dir}/attached" -type f | wc -l) CNI_IFNAME="eth${nwcount}" - export CNI_ARGS CNI_IFNAME + export CNI_ARGS CNI_IFNAME CAP_ARGS cnitool add "${NET}" "/proc/${pid}/ns/net" >"${dir}/attached/${NET}" echo "${CNI_ARGS}" >"${dir}/attached-args/${NET}" diff --git a/docs/source/markdown/podman-build.1.md b/docs/source/markdown/podman-build.1.md index 61c05fdef..e05678e2c 100644 --- a/docs/source/markdown/podman-build.1.md +++ b/docs/source/markdown/podman-build.1.md @@ -455,9 +455,8 @@ not required for Buildah as it supports only Linux. #### **--pull** -When the option is specified or set to "true", pull the image from the first -registry it is found in as listed in registries.conf. Raise an error if not -found in the registries, even if the image is present locally. +When the option is specified or set to "true", pull the image. Raise an error +if the image could not be pulled, even if the image is present locally. If the option is disabled (with *--pull=false*) or not specified, pull the image from the registry only if the image is not present locally. Raise an diff --git a/docs/source/markdown/podman-push.1.md b/docs/source/markdown/podman-push.1.md index f7624ed5f..3ed5f60c0 100644 --- a/docs/source/markdown/podman-push.1.md +++ b/docs/source/markdown/podman-push.1.md @@ -90,8 +90,7 @@ solely for scripting compatibility. #### **--format**, **-f**=*format* -Manifest Type (oci, v2s1, or v2s2) to use when pushing an image to a directory using the 'dir:' transport (default is manifest type of source) -Note: This flag can only be set when using the **dir** transport +Manifest Type (oci, v2s2, or v2s1) to use when pushing an image. #### **--quiet**, **-q** @@ -99,11 +98,11 @@ When writing the output image, suppress progress output #### **--remove-signatures** -Discard any pre-existing signatures in the image +Discard any pre-existing signatures in the image. (Not available for remote commands) #### **--sign-by**=*key* -Add a signature at the destination using the specified key +Add a signature at the destination using the specified key. (Not available for remote commands) #### **--tls-verify**=*true|false* @@ -10,10 +10,10 @@ require ( github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect github.com/containernetworking/cni v0.8.0 github.com/containernetworking/plugins v0.9.0 - github.com/containers/buildah v1.19.2 + github.com/containers/buildah v1.19.3 github.com/containers/common v0.33.1 github.com/containers/conmon v2.0.20+incompatible - github.com/containers/image/v5 v5.9.0 + github.com/containers/image/v5 v5.10.1 github.com/containers/psgo v1.5.2 github.com/containers/storage v1.24.5 github.com/coreos/go-systemd/v22 v22.1.0 @@ -95,14 +95,16 @@ github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ github.com/containernetworking/plugins v0.8.7/go.mod h1:R7lXeZaBzpfqapcAbHRW8/CYwm0dHzbz0XEjofx0uB0= github.com/containernetworking/plugins v0.9.0 h1:c+1gegKhR7+d0Caum9pEHugZlyhXPOG6v3V6xJgIGCI= github.com/containernetworking/plugins v0.9.0/go.mod h1:dbWv4dI0QrBGuVgj+TuVQ6wJRZVOhrCQj91YyC92sxg= -github.com/containers/buildah v1.19.2 h1:1/ePUtinuqTPSwXiZXPyBJmik688l1e4SUZsoOv716w= -github.com/containers/buildah v1.19.2/go.mod h1:zUMKdtZu4rs6lgKHheKwo+wBlh5ZL+1+/5/IsaNTD74= +github.com/containers/buildah v1.19.3 h1:U0E1UKzqW5C11W7giHhLZI06xkZiV40ZKDK/c1jotbE= +github.com/containers/buildah v1.19.3/go.mod h1:uZb6GuE36tmRSOcIXGfiYqdpr+GPXWmlUIJSk5sn19w= github.com/containers/common v0.33.1 h1:XpDiq8Cta8+u1s4kpYSEWdB140ZmqgyIXfWkLqKx3z0= github.com/containers/common v0.33.1/go.mod h1:mjDo/NKeweL/onaspLhZ38WnHXaYmrELHclIdvSnYpY= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/image/v5 v5.9.0 h1:dRmUtcluQcmasNo3DpnRoZjfU0rOu1qZeL6wlDJr10Q= github.com/containers/image/v5 v5.9.0/go.mod h1:blOEFd/iFdeyh891ByhCVUc+xAcaI3gBegXECwz9UbQ= +github.com/containers/image/v5 v5.10.1 h1:tHhGQ8RCMxJfJLD/PEW1qrOKX8nndledW9qz6UiAxns= +github.com/containers/image/v5 v5.10.1/go.mod h1:JlRLJZv7elVbtHaaaR6Kz8i6G3k2ttj4t7fubwxD9Hs= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c= @@ -340,6 +342,8 @@ github.com/klauspost/compress v1.11.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.5 h1:xNCE0uE6yvTPRS+0wGNMHPo3NIpwnk6aluQZ6R6kRcc= github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -571,6 +575,8 @@ github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/ github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.9 h1:RsKRIA2MO8x56wkkcd3LbtcE/uMszhb6DpRf+3uwa3I= +github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -579,6 +585,8 @@ github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02 github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= github.com/vbauerster/mpb/v5 v5.3.0 h1:vgrEJjUzHaSZKDRRxul5Oh4C72Yy/5VEMb0em+9M0mQ= github.com/vbauerster/mpb/v5 v5.3.0/go.mod h1:4yTkvAb8Cm4eylAp6t0JRq6pXDkFJ4krUlDqWYkakAs= +github.com/vbauerster/mpb/v5 v5.4.0 h1:n8JPunifvQvh6P1D1HAl2Ur9YcmKT1tpoUuiea5mlmg= +github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8JDaKXR4JGI= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA= @@ -744,6 +752,7 @@ golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= diff --git a/libpod/kube.go b/libpod/kube.go index 753c58099..b5197293e 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -171,9 +171,10 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor deDupPodVolumes := make(map[string]*v1.Volume) first := true podContainers := make([]v1.Container, 0, len(containers)) + dnsInfo := v1.PodDNSConfig{} for _, ctr := range containers { if !ctr.IsInfra() { - ctr, volumes, err := containerToV1Container(ctr) + ctr, volumes, _, err := containerToV1Container(ctr) if err != nil { return nil, err } @@ -196,6 +197,22 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor vol := vol deDupPodVolumes[vol.Name] = &vol } + } else { + _, _, infraDNS, err := containerToV1Container(ctr) + if err != nil { + return nil, err + } + if infraDNS != nil { + if servers := infraDNS.Nameservers; len(servers) > 0 { + dnsInfo.Nameservers = servers + } + if searches := infraDNS.Searches; len(searches) > 0 { + dnsInfo.Searches = searches + } + if options := infraDNS.Options; len(options) > 0 { + dnsInfo.Options = options + } + } } } podVolumes := make([]v1.Volume, 0, len(deDupPodVolumes)) @@ -203,10 +220,10 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor podVolumes = append(podVolumes, *vol) } - return addContainersAndVolumesToPodObject(podContainers, podVolumes, p.Name()), nil + return addContainersAndVolumesToPodObject(podContainers, podVolumes, p.Name(), &dnsInfo), nil } -func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1.Volume, podName string) *v1.Pod { +func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1.Volume, podName string, dnsOptions *v1.PodDNSConfig) *v1.Pod { tm := v12.TypeMeta{ Kind: "Pod", APIVersion: "v1", @@ -228,6 +245,9 @@ func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1. Containers: containers, Volumes: volumes, } + if dnsOptions != nil { + ps.DNSConfig = dnsOptions + } p := v1.Pod{ TypeMeta: tm, ObjectMeta: om, @@ -241,32 +261,65 @@ func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1. func simplePodWithV1Containers(ctrs []*Container) (*v1.Pod, error) { kubeCtrs := make([]v1.Container, 0, len(ctrs)) kubeVolumes := make([]v1.Volume, 0) + podDNS := v1.PodDNSConfig{} for _, ctr := range ctrs { - kubeCtr, kubeVols, err := containerToV1Container(ctr) + kubeCtr, kubeVols, ctrDNS, err := containerToV1Container(ctr) if err != nil { return nil, err } kubeCtrs = append(kubeCtrs, kubeCtr) kubeVolumes = append(kubeVolumes, kubeVols...) - } - return addContainersAndVolumesToPodObject(kubeCtrs, kubeVolumes, strings.ReplaceAll(ctrs[0].Name(), "_", "")), nil + // Combine DNS information in sum'd structure + if ctrDNS != nil { + // nameservers + if servers := ctrDNS.Nameservers; servers != nil { + if podDNS.Nameservers == nil { + podDNS.Nameservers = make([]string, 0) + } + for _, s := range servers { + if !util.StringInSlice(s, podDNS.Nameservers) { // only append if it does not exist + podDNS.Nameservers = append(podDNS.Nameservers, s) + } + } + } + // search domains + if domains := ctrDNS.Searches; domains != nil { + if podDNS.Searches == nil { + podDNS.Searches = make([]string, 0) + } + for _, d := range domains { + if !util.StringInSlice(d, podDNS.Searches) { // only append if it does not exist + podDNS.Searches = append(podDNS.Searches, d) + } + } + } + // dns options + if options := ctrDNS.Options; options != nil { + if podDNS.Options == nil { + podDNS.Options = make([]v1.PodDNSConfigOption, 0) + } + podDNS.Options = append(podDNS.Options, options...) + } + } // end if ctrDNS + } + return addContainersAndVolumesToPodObject(kubeCtrs, kubeVolumes, strings.ReplaceAll(ctrs[0].Name(), "_", ""), &podDNS), nil } // containerToV1Container converts information we know about a libpod container // to a V1.Container specification. -func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) { +func containerToV1Container(c *Container) (v1.Container, []v1.Volume, *v1.PodDNSConfig, error) { kubeContainer := v1.Container{} kubeVolumes := []v1.Volume{} kubeSec, err := generateKubeSecurityContext(c) if err != nil { - return kubeContainer, kubeVolumes, err + return kubeContainer, kubeVolumes, nil, err } if len(c.config.Spec.Linux.Devices) > 0 { // TODO Enable when we can support devices and their names kubeContainer.VolumeDevices = generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices) - return kubeContainer, kubeVolumes, errors.Wrapf(define.ErrNotImplemented, "linux devices") + return kubeContainer, kubeVolumes, nil, errors.Wrapf(define.ErrNotImplemented, "linux devices") } if len(c.config.UserVolumes) > 0 { @@ -274,7 +327,7 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) { // Volume names need to be coordinated "globally" in the kube files. volumeMounts, volumes, err := libpodMountsToKubeVolumeMounts(c) if err != nil { - return kubeContainer, kubeVolumes, err + return kubeContainer, kubeVolumes, nil, err } kubeContainer.VolumeMounts = volumeMounts kubeVolumes = append(kubeVolumes, volumes...) @@ -282,16 +335,16 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) { envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env) if err != nil { - return kubeContainer, kubeVolumes, err + return kubeContainer, kubeVolumes, nil, err } portmappings, err := c.PortMappings() if err != nil { - return kubeContainer, kubeVolumes, err + return kubeContainer, kubeVolumes, nil, err } ports, err := ocicniPortMappingToContainerPort(portmappings) if err != nil { - return kubeContainer, kubeVolumes, err + return kubeContainer, kubeVolumes, nil, err } containerCommands := c.Command() @@ -355,7 +408,38 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) { } } - return kubeContainer, kubeVolumes, nil + // Obtain the DNS entries from the container + dns := v1.PodDNSConfig{} + + // DNS servers + if servers := c.config.DNSServer; len(servers) > 0 { + dnsServers := make([]string, 0) + for _, server := range servers { + dnsServers = append(dnsServers, server.String()) + } + dns.Nameservers = dnsServers + } + + // DNS search domains + if searches := c.config.DNSSearch; len(searches) > 0 { + dns.Searches = searches + } + + // DNS options + if options := c.config.DNSOption; len(options) > 0 { + dnsOptions := make([]v1.PodDNSConfigOption, 0) + for _, option := range options { + // the option can be "k:v" or just "k", no delimiter is required + opts := strings.SplitN(option, ":", 2) + dnsOpt := v1.PodDNSConfigOption{ + Name: opts[0], + Value: &opts[1], + } + dnsOptions = append(dnsOptions, dnsOpt) + } + dns.Options = dnsOptions + } + return kubeContainer, kubeVolumes, &dns, nil } // ocicniPortMappingToContainerPort takes an ocicni portmapping and converts diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index ef2f034ab..737dbf935 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -977,7 +977,7 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e if c.state.NetNS == nil { // We still want to make dummy configurations for each CNI net // the container joined. - if len(networks) > 0 && !isDefault { + if len(networks) > 0 { settings.Networks = make(map[string]*define.InspectAdditionalNetwork, len(networks)) for _, net := range networks { cniNet := new(define.InspectAdditionalNetwork) @@ -998,7 +998,7 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e } // If we have CNI networks - handle that here - if len(networks) > 0 && !isDefault { + if len(networks) > 0 { if len(networks) != len(c.state.NetworkStatus) { return nil, errors.Wrapf(define.ErrInternal, "network inspection mismatch: asked to join %d CNI network(s) %v, but have information on %d network(s)", len(networks), networks, len(c.state.NetworkStatus)) } @@ -1028,7 +1028,9 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e settings.Networks[name] = addedNet } - return settings, nil + if !isDefault { + return settings, nil + } } // If not joining networks, we should have at most 1 result diff --git a/libpod/oci_conmon_exec_linux.go b/libpod/oci_conmon_exec_linux.go index dc5dd03df..faf86ea5b 100644 --- a/libpod/oci_conmon_exec_linux.go +++ b/libpod/oci_conmon_exec_linux.go @@ -126,20 +126,25 @@ func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, o }() attachChan := make(chan error) + conmonPipeDataChan := make(chan conmonPipeData) go func() { // attachToExec is responsible for closing pipes - attachChan <- attachExecHTTP(ctr, sessionID, req, w, streams, pipes, detachKeys, options.Terminal, cancel, hijackDone, holdConnOpen) + attachChan <- attachExecHTTP(ctr, sessionID, req, w, streams, pipes, detachKeys, options.Terminal, cancel, hijackDone, holdConnOpen, execCmd, conmonPipeDataChan, ociLog) close(attachChan) }() - // Wait for conmon to succeed, when return. - if err := execCmd.Wait(); err != nil { - return -1, nil, errors.Wrapf(err, "cannot run conmon") - } + // NOTE: the channel is needed to communicate conmon's data. In case + // of an error, the error will be written on the hijacked http + // connection such that remote clients will receive the error. + pipeData := <-conmonPipeDataChan - pid, err := readConmonPipeData(pipes.syncPipe, ociLog) + return pipeData.pid, attachChan, pipeData.err +} - return pid, attachChan, err +// conmonPipeData contains the data when reading from conmon's pipe. +type conmonPipeData struct { + pid int + err error } // ExecContainerDetached executes a command in a running container, but does @@ -488,9 +493,16 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex } // Attach to a container over HTTP -func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool) (deferredErr error) { +func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, execCmd *exec.Cmd, conmonPipeDataChan chan<- conmonPipeData, ociLog string) (deferredErr error) { + // NOTE: As you may notice, the attach code is quite complex. + // Many things happen concurrently and yet are interdependent. + // If you ever change this function, make sure to write to the + // conmonPipeDataChan in case of an error. + if pipes == nil || pipes.startPipe == nil || pipes.attachPipe == nil { - return errors.Wrapf(define.ErrInvalidArg, "must provide a start and attach pipe to finish an exec attach") + err := errors.Wrapf(define.ErrInvalidArg, "must provide a start and attach pipe to finish an exec attach") + conmonPipeDataChan <- conmonPipeData{-1, err} + return err } defer func() { @@ -509,17 +521,20 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp // set up the socket path, such that it is the correct length and location for exec sockPath, err := c.execAttachSocketPath(sessionID) if err != nil { + conmonPipeDataChan <- conmonPipeData{-1, err} return err } // 2: read from attachFd that the parent process has set up the console socket if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil { + conmonPipeDataChan <- conmonPipeData{-1, err} return err } // 2: then attach conn, err := openUnixSocket(sockPath) if err != nil { + conmonPipeDataChan <- conmonPipeData{-1, err} return errors.Wrapf(err, "failed to connect to container's attach socket: %v", sockPath) } defer func() { @@ -540,11 +555,13 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp // Perform hijack hijacker, ok := w.(http.Hijacker) if !ok { + conmonPipeDataChan <- conmonPipeData{-1, err} return errors.Errorf("unable to hijack connection") } httpCon, httpBuf, err := hijacker.Hijack() if err != nil { + conmonPipeDataChan <- conmonPipeData{-1, err} return errors.Wrapf(err, "error hijacking connection") } @@ -555,10 +572,23 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp // Force a flush after the header is written. if err := httpBuf.Flush(); err != nil { + conmonPipeDataChan <- conmonPipeData{-1, err} return errors.Wrapf(err, "error flushing HTTP hijack header") } go func() { + // Wait for conmon to succeed, when return. + if err := execCmd.Wait(); err != nil { + conmonPipeDataChan <- conmonPipeData{-1, err} + } else { + pid, err := readConmonPipeData(pipes.syncPipe, ociLog) + if err != nil { + hijackWriteError(err, c.ID(), isTerminal, httpBuf) + conmonPipeDataChan <- conmonPipeData{pid, err} + } else { + conmonPipeDataChan <- conmonPipeData{pid, err} + } + } // We need to hold the connection open until the complete exec // function has finished. This channel will be closed in a defer // in that function, so we can wait for it here. diff --git a/libpod/rootless_cni_linux.go b/libpod/rootless_cni_linux.go index 9a980750f..94ae062aa 100644 --- a/libpod/rootless_cni_linux.go +++ b/libpod/rootless_cni_linux.go @@ -25,7 +25,7 @@ import ( // Built from ../contrib/rootless-cni-infra. var rootlessCNIInfraImage = map[string]string{ - "amd64": "quay.io/libpod/rootless-cni-infra@sha256:304742d5d221211df4ec672807a5842ff11e3729c50bc424ea0cea858f69d7b7", // 3-amd64 + "amd64": "quay.io/libpod/rootless-cni-infra@sha256:adf352454666f7ce9ca3e1098448b5ee18f89c4516471ec99447ec9ece917f36", // 5-amd64 } const ( @@ -58,9 +58,33 @@ func AllocRootlessCNI(ctx context.Context, c *Container) (ns.NetNS, []*cnitypes. return nil, nil, err } k8sPodName := getCNIPodName(c) // passed to CNI as K8S_POD_NAME + ip := "" + if c.config.StaticIP != nil { + ip = c.config.StaticIP.String() + } + mac := "" + if c.config.StaticMAC != nil { + mac = c.config.StaticMAC.String() + } + aliases, err := c.runtime.state.GetAllNetworkAliases(c) + if err != nil { + return nil, nil, err + } + capArgs := "" + // add network aliases json encoded as capabilityArgs for cni + if len(aliases) > 0 { + capabilityArgs := make(map[string]interface{}) + capabilityArgs["aliases"] = aliases + b, err := json.Marshal(capabilityArgs) + if err != nil { + return nil, nil, err + } + capArgs = string(b) + } + cniResults := make([]*cnitypes.Result, len(networks)) for i, nw := range networks { - cniRes, err := rootlessCNIInfraCallAlloc(infra, c.ID(), nw, k8sPodName) + cniRes, err := rootlessCNIInfraCallAlloc(infra, c.ID(), nw, k8sPodName, ip, mac, capArgs) if err != nil { return nil, nil, err } @@ -137,11 +161,11 @@ func getCNIPodName(c *Container) string { return c.Name() } -func rootlessCNIInfraCallAlloc(infra *Container, id, nw, k8sPodName string) (*cnitypes.Result, error) { - logrus.Debugf("rootless CNI: alloc %q, %q, %q", id, nw, k8sPodName) +func rootlessCNIInfraCallAlloc(infra *Container, id, nw, k8sPodName, ip, mac, capArgs string) (*cnitypes.Result, error) { + logrus.Debugf("rootless CNI: alloc %q, %q, %q, %q, %q, %q", id, nw, k8sPodName, ip, mac, capArgs) var err error - _, err = rootlessCNIInfraExec(infra, "alloc", id, nw, k8sPodName) + _, err = rootlessCNIInfraExec(infra, "alloc", id, nw, k8sPodName, ip, mac, capArgs) if err != nil { return nil, err } diff --git a/libpod/util.go b/libpod/util.go index bf9bf2542..391208fb9 100644 --- a/libpod/util.go +++ b/libpod/util.go @@ -235,20 +235,16 @@ func checkDependencyContainer(depCtr, ctr *Container) error { return nil } -// hijackWriteErrorAndClose writes an error to a hijacked HTTP session and -// closes it. Intended to HTTPAttach function. -// If error is nil, it will not be written; we'll only close the connection. -func hijackWriteErrorAndClose(toWrite error, cid string, terminal bool, httpCon io.Closer, httpBuf *bufio.ReadWriter) { +// hijackWriteError writes an error to a hijacked HTTP session. +func hijackWriteError(toWrite error, cid string, terminal bool, httpBuf *bufio.ReadWriter) { if toWrite != nil { - errString := []byte(fmt.Sprintf("%v\n", toWrite)) + errString := []byte(fmt.Sprintf("Error: %v\n", toWrite)) if !terminal { // We need a header. header := makeHTTPAttachHeader(2, uint32(len(errString))) if _, err := httpBuf.Write(header); err != nil { logrus.Errorf("Error writing header for container %s attach connection error: %v", cid, err) } - // TODO: May want to return immediately here to avoid - // writing garbage to the socket? } if _, err := httpBuf.Write(errString); err != nil { logrus.Errorf("Error writing error to container %s HTTP attach connection: %v", cid, err) @@ -257,6 +253,13 @@ func hijackWriteErrorAndClose(toWrite error, cid string, terminal bool, httpCon logrus.Errorf("Error flushing HTTP buffer for container %s HTTP attach connection: %v", cid, err) } } +} + +// hijackWriteErrorAndClose writes an error to a hijacked HTTP session and +// closes it. Intended to HTTPAttach function. +// If error is nil, it will not be written; we'll only close the connection. +func hijackWriteErrorAndClose(toWrite error, cid string, terminal bool, httpCon io.Closer, httpBuf *bufio.ReadWriter) { + hijackWriteError(toWrite, cid, terminal, httpBuf) if err := httpCon.Close(); err != nil { logrus.Errorf("Error closing container %s HTTP attach connection: %v", cid, err) diff --git a/nix/default.nix b/nix/default.nix index 13b4585ea..7745d8b50 100644 --- a/nix/default.nix +++ b/nix/default.nix @@ -49,9 +49,11 @@ let buildPhase = '' patchShebangs . make bin/podman + make bin/podman-remote ''; installPhase = '' install -Dm755 bin/podman $out/bin/podman + install -Dm755 bin/podman-remote $out/bin/podman-remote ''; }; in self diff --git a/nix/nixpkgs.json b/nix/nixpkgs.json index d304de536..0cfb251f2 100644 --- a/nix/nixpkgs.json +++ b/nix/nixpkgs.json @@ -1,9 +1,9 @@ { "url": "https://github.com/nixos/nixpkgs", - "rev": "4a75203f0270f96cbc87f5dfa5d5185690237d87", - "date": "2020-12-29T03:18:48+01:00", - "path": "/nix/store/scswsm6r4jnhp9ki0f6s81kpj5x6jkn7-nixpkgs", - "sha256": "0h70fm9aa7s06wkalbadw70z5rscbs3p6nblb47z523nhlzgjxk9", + "rev": "ce7b327a52d1b82f82ae061754545b1c54b06c66", + "date": "2021-01-25T11:28:05+01:00", + "path": "/nix/store/dpsa6a1sy8hwhwjkklc52brs9z1k5fx9-nixpkgs", + "sha256": "1rc4if8nmy9lrig0ddihdwpzg2s8y36vf20hfywb8hph5hpsg4vj", "fetchSubmodules": false, "deepClone": false, "leaveDotGit": false diff --git a/pkg/api/handlers/compat/containers.go b/pkg/api/handlers/compat/containers.go index aa12afc82..0f91a4362 100644 --- a/pkg/api/handlers/compat/containers.go +++ b/pkg/api/handlers/compat/containers.go @@ -73,7 +73,7 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } - if report[0].Err != nil { + if len(report) > 0 && report[0].Err != nil { utils.InternalServerError(w, report[0].Err) return } diff --git a/pkg/api/handlers/compat/images_push.go b/pkg/api/handlers/compat/images_push.go index 0f3da53e8..c352ac6cd 100644 --- a/pkg/api/handlers/compat/images_push.go +++ b/pkg/api/handlers/compat/images_push.go @@ -3,13 +3,15 @@ package compat import ( "context" "net/http" - "os" "strings" + "github.com/containers/image/v5/types" "github.com/containers/podman/v2/libpod" - "github.com/containers/podman/v2/libpod/image" "github.com/containers/podman/v2/pkg/api/handlers/utils" "github.com/containers/podman/v2/pkg/auth" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/containers/podman/v2/pkg/domain/infra/abi" + "github.com/containers/storage" "github.com/gorilla/schema" "github.com/pkg/errors" ) @@ -18,11 +20,20 @@ import ( func PushImage(w http.ResponseWriter, r *http.Request) { decoder := r.Context().Value("decoder").(*schema.Decoder) runtime := r.Context().Value("runtime").(*libpod.Runtime) + // Now use the ABI implementation to prevent us from having duplicate + // code. + imageEngine := abi.ImageEngine{Libpod: runtime} query := struct { - Tag string `schema:"tag"` + All bool `schema:"all"` + Compress bool `schema:"compress"` + Destination string `schema:"destination"` + Format string `schema:"format"` + TLSVerify bool `schema:"tlsVerify"` + Tag string `schema:"tag"` }{ // This is where you can override the golang default value for one of fields + TLSVerify: true, } if err := decoder.Decode(&query, r.URL.Query()); err != nil { @@ -43,39 +54,34 @@ func PushImage(w http.ResponseWriter, r *http.Request) { return } - newImage, err := runtime.ImageRuntime().NewFromLocal(imageName) - if err != nil { - utils.ImageNotFound(w, imageName, errors.Wrapf(err, "failed to find image %s", imageName)) - return - } - - authConf, authfile, key, err := auth.GetCredentials(r) + authconf, authfile, key, err := auth.GetCredentials(r) if err != nil { utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "failed to parse %q header for %s", key, r.URL.String())) return } defer auth.RemoveAuthfile(authfile) - - dockerRegistryOptions := &image.DockerRegistryOptions{DockerRegistryCreds: authConf} - if sys := runtime.SystemContext(); sys != nil { - dockerRegistryOptions.DockerCertPath = sys.DockerCertPath - dockerRegistryOptions.RegistriesConfPath = sys.SystemRegistriesConfPath + var username, password string + if authconf != nil { + username = authconf.Username + password = authconf.Password + } + options := entities.ImagePushOptions{ + All: query.All, + Authfile: authfile, + Compress: query.Compress, + Format: query.Format, + Password: password, + Username: username, } + if _, found := r.URL.Query()["tlsVerify"]; found { + options.SkipTLSVerify = types.NewOptionalBool(!query.TLSVerify) + } + if err := imageEngine.Push(context.Background(), imageName, query.Destination, options); err != nil { + if errors.Cause(err) != storage.ErrImageUnknown { + utils.ImageNotFound(w, imageName, errors.Wrapf(err, "failed to find image %s", imageName)) + return + } - err = newImage.PushImageToHeuristicDestination( - context.Background(), - imageName, - "", // manifest type - authfile, - "", // digest file - "", // signature policy - os.Stderr, - false, // force compression - image.SigningOptions{}, - dockerRegistryOptions, - nil, // additional tags - ) - if err != nil { utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "error pushing image %q", imageName)) return } diff --git a/pkg/api/handlers/libpod/containers.go b/pkg/api/handlers/libpod/containers.go index 6b07b1cc5..a0cb1d49e 100644 --- a/pkg/api/handlers/libpod/containers.go +++ b/pkg/api/handlers/libpod/containers.go @@ -12,7 +12,6 @@ import ( "github.com/containers/podman/v2/pkg/api/handlers/utils" "github.com/containers/podman/v2/pkg/domain/entities" "github.com/containers/podman/v2/pkg/domain/infra/abi" - "github.com/containers/podman/v2/pkg/ps" "github.com/gorilla/schema" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -63,6 +62,7 @@ func ListContainers(w http.ResponseWriter, r *http.Request) { decoder := r.Context().Value("decoder").(*schema.Decoder) query := struct { All bool `schema:"all"` + External bool `schema:"external"` Filters map[string][]string `schema:"filters"` Last int `schema:"last"` // alias for limit Limit int `schema:"limit"` @@ -90,17 +90,22 @@ func ListContainers(w http.ResponseWriter, r *http.Request) { } runtime := r.Context().Value("runtime").(*libpod.Runtime) + // Now use the ABI implementation to prevent us from having duplicate + // code. + containerEngine := abi.ContainerEngine{Libpod: runtime} opts := entities.ContainerListOptions{ All: query.All, + External: query.External, Filters: query.Filters, Last: limit, - Size: query.Size, - Sort: "", Namespace: query.Namespace, - Pod: true, - Sync: query.Sync, + // Always return Pod, should not be part of the API. + // https://github.com/containers/podman/pull/7223 + Pod: true, + Size: query.Size, + Sync: query.Sync, } - pss, err := ps.GetContainerLists(runtime, opts) + pss, err := containerEngine.ContainerList(r.Context(), opts) if err != nil { utils.InternalServerError(w, err) return diff --git a/pkg/api/handlers/libpod/manifests.go b/pkg/api/handlers/libpod/manifests.go index 35221ecf1..ded51a31f 100644 --- a/pkg/api/handlers/libpod/manifests.go +++ b/pkg/api/handlers/libpod/manifests.go @@ -147,7 +147,6 @@ func ManifestPush(w http.ResponseWriter, r *http.Request) { query := struct { All bool `schema:"all"` Destination string `schema:"destination"` - Format string `schema:"format"` TLSVerify bool `schema:"tlsVerify"` }{ // Add defaults here once needed. @@ -163,24 +162,21 @@ func ManifestPush(w http.ResponseWriter, r *http.Request) { } source := utils.GetName(r) - authConf, authfile, key, err := auth.GetCredentials(r) + authconf, authfile, key, err := auth.GetCredentials(r) if err != nil { utils.Error(w, "failed to retrieve repository credentials", http.StatusBadRequest, errors.Wrapf(err, "failed to parse %q header for %s", key, r.URL.String())) return } defer auth.RemoveAuthfile(authfile) var username, password string - if authConf != nil { - username = authConf.Username - password = authConf.Password - + if authconf != nil { + username = authconf.Username + password = authconf.Password } - options := entities.ImagePushOptions{ Authfile: authfile, Username: username, Password: password, - Format: query.Format, All: query.All, } if sys := runtime.SystemContext(); sys != nil { diff --git a/pkg/api/server/register_containers.go b/pkg/api/server/register_containers.go index 74a04b2e6..e30747800 100644 --- a/pkg/api/server/register_containers.go +++ b/pkg/api/server/register_containers.go @@ -48,6 +48,11 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // default: false // description: Return all containers. By default, only running containers are shown // - in: query + // name: external + // type: boolean + // default: false + // description: Return containers in storage not controlled by Podman + // - in: query // name: limit // description: Return this number of most recently created containers, including non-running ones. // type: integer diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go index d76f811e9..2ce0829b4 100644 --- a/pkg/api/server/register_images.go +++ b/pkg/api/server/register_images.go @@ -235,6 +235,18 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // name: tag // type: string // description: The tag to associate with the image on the registry. + // - in: query + // name: all + // type: boolean + // description: All indicates whether to push all images related to the image list + // - in: query + // name: compress + // type: boolean + // description: use compression on image + // - in: query + // name: destination + // type: string + // description: destination name for the image being pushed // - in: header // name: X-Registry-Auth // type: string diff --git a/pkg/bindings/containers/types.go b/pkg/bindings/containers/types.go index 24604fa83..3fb1ab733 100644 --- a/pkg/bindings/containers/types.go +++ b/pkg/bindings/containers/types.go @@ -106,6 +106,7 @@ type MountedContainerPathsOptions struct{} // ListOptions are optional options for listing containers type ListOptions struct { All *bool + External *bool Filters map[string][]string Last *int Namespace *bool diff --git a/pkg/bindings/containers/types_list_options.go b/pkg/bindings/containers/types_list_options.go index 43326fa59..c363dcd32 100644 --- a/pkg/bindings/containers/types_list_options.go +++ b/pkg/bindings/containers/types_list_options.go @@ -103,6 +103,22 @@ func (o *ListOptions) GetAll() bool { return *o.All } +// WithExternal +func (o *ListOptions) WithExternal(value bool) *ListOptions { + v := &value + o.External = v + return o +} + +// GetExternal +func (o *ListOptions) GetExternal() bool { + var external bool + if o.External == nil { + return external + } + return *o.External +} + // WithFilters func (o *ListOptions) WithFilters(value map[string][]string) *ListOptions { v := value diff --git a/pkg/bindings/images/types.go b/pkg/bindings/images/types.go index 0248f2fa6..7bf70c82b 100644 --- a/pkg/bindings/images/types.go +++ b/pkg/bindings/images/types.go @@ -2,7 +2,6 @@ package images import ( "github.com/containers/buildah/imagebuildah" - "github.com/containers/common/pkg/config" ) //go:generate go run ../generator/generator.go RemoveOptions @@ -104,37 +103,16 @@ type PushOptions struct { // Authfile is the path to the authentication file. Ignored for remote // calls. Authfile *string - // CertDir is the path to certificate directories. Ignored for remote - // calls. - CertDir *string - // Compress tarball image layers when pushing to a directory using the 'dir' - // transport. Default is same compression type as source. Ignored for remote - // calls. + // Compress tarball image layers when pushing to a directory using the 'dir' transport. Compress *bool - // Username for authenticating against the registry. - Username *string + // Manifest type of the pushed image + Format *string // Password for authenticating against the registry. Password *string - // DigestFile, after copying the image, write the digest of the resulting - // image to the file. Ignored for remote calls. - DigestFile *string - // Format is the Manifest type (oci, v2s1, or v2s2) to use when pushing an - // image using the 'dir' transport. Default is manifest type of source. - // Ignored for remote calls. - Format *string - // Quiet can be specified to suppress pull progress when pulling. Ignored - // for remote calls. - Quiet *bool - // RemoveSignatures, discard any pre-existing signatures in the image. - // Ignored for remote calls. - RemoveSignatures *bool - // SignaturePolicy to use when pulling. Ignored for remote calls. - SignaturePolicy *string - // SignBy adds a signature at the destination using the specified key. - // Ignored for remote calls. - SignBy *string // SkipTLSVerify to skip HTTPS and certificate verification. SkipTLSVerify *bool + // Username for authenticating against the registry. + Username *string } //go:generate go run ../generator/generator.go SearchOptions @@ -161,32 +139,25 @@ type PullOptions struct { // AllTags can be specified to pull all tags of an image. Note // that this only works if the image does not include a tag. AllTags *bool + // Arch will overwrite the local architecture for image pulls. + Arch *string // Authfile is the path to the authentication file. Ignored for remote // calls. Authfile *string - // CertDir is the path to certificate directories. Ignored for remote - // calls. - CertDir *string - // Username for authenticating against the registry. - Username *string - // Password for authenticating against the registry. - Password *string - // Arch will overwrite the local architecture for image pulls. - Arch *string // OS will overwrite the local operating system (OS) for image // pulls. OS *string - // Variant will overwrite the local variant for image pulls. - Variant *string + // Password for authenticating against the registry. + Password *string // Quiet can be specified to suppress pull progress when pulling. Ignored // for remote calls. Quiet *bool - // SignaturePolicy to use when pulling. Ignored for remote calls. - SignaturePolicy *string // SkipTLSVerify to skip HTTPS and certificate verification. SkipTLSVerify *bool - // PullPolicy whether to pull new image - PullPolicy *config.PullPolicy + // Username for authenticating against the registry. + Username *string + // Variant will overwrite the local variant for image pulls. + Variant *string } //BuildOptions are optional options for building images diff --git a/pkg/bindings/images/types_pull_options.go b/pkg/bindings/images/types_pull_options.go index 2bdf2b66e..5452560fb 100644 --- a/pkg/bindings/images/types_pull_options.go +++ b/pkg/bindings/images/types_pull_options.go @@ -6,7 +6,6 @@ import ( "strconv" "strings" - "github.com/containers/common/pkg/config" jsoniter "github.com/json-iterator/go" "github.com/pkg/errors" ) @@ -104,70 +103,6 @@ func (o *PullOptions) GetAllTags() bool { return *o.AllTags } -// WithAuthfile -func (o *PullOptions) WithAuthfile(value string) *PullOptions { - v := &value - o.Authfile = v - return o -} - -// GetAuthfile -func (o *PullOptions) GetAuthfile() string { - var authfile string - if o.Authfile == nil { - return authfile - } - return *o.Authfile -} - -// WithCertDir -func (o *PullOptions) WithCertDir(value string) *PullOptions { - v := &value - o.CertDir = v - return o -} - -// GetCertDir -func (o *PullOptions) GetCertDir() string { - var certDir string - if o.CertDir == nil { - return certDir - } - return *o.CertDir -} - -// WithUsername -func (o *PullOptions) WithUsername(value string) *PullOptions { - v := &value - o.Username = v - return o -} - -// GetUsername -func (o *PullOptions) GetUsername() string { - var username string - if o.Username == nil { - return username - } - return *o.Username -} - -// WithPassword -func (o *PullOptions) WithPassword(value string) *PullOptions { - v := &value - o.Password = v - return o -} - -// GetPassword -func (o *PullOptions) GetPassword() string { - var password string - if o.Password == nil { - return password - } - return *o.Password -} - // WithArch func (o *PullOptions) WithArch(value string) *PullOptions { v := &value @@ -184,6 +119,22 @@ func (o *PullOptions) GetArch() string { return *o.Arch } +// WithAuthfile +func (o *PullOptions) WithAuthfile(value string) *PullOptions { + v := &value + o.Authfile = v + return o +} + +// GetAuthfile +func (o *PullOptions) GetAuthfile() string { + var authfile string + if o.Authfile == nil { + return authfile + } + return *o.Authfile +} + // WithOS func (o *PullOptions) WithOS(value string) *PullOptions { v := &value @@ -200,20 +151,20 @@ func (o *PullOptions) GetOS() string { return *o.OS } -// WithVariant -func (o *PullOptions) WithVariant(value string) *PullOptions { +// WithPassword +func (o *PullOptions) WithPassword(value string) *PullOptions { v := &value - o.Variant = v + o.Password = v return o } -// GetVariant -func (o *PullOptions) GetVariant() string { - var variant string - if o.Variant == nil { - return variant +// GetPassword +func (o *PullOptions) GetPassword() string { + var password string + if o.Password == nil { + return password } - return *o.Variant + return *o.Password } // WithQuiet @@ -232,22 +183,6 @@ func (o *PullOptions) GetQuiet() bool { return *o.Quiet } -// WithSignaturePolicy -func (o *PullOptions) WithSignaturePolicy(value string) *PullOptions { - v := &value - o.SignaturePolicy = v - return o -} - -// GetSignaturePolicy -func (o *PullOptions) GetSignaturePolicy() string { - var signaturePolicy string - if o.SignaturePolicy == nil { - return signaturePolicy - } - return *o.SignaturePolicy -} - // WithSkipTLSVerify func (o *PullOptions) WithSkipTLSVerify(value bool) *PullOptions { v := &value @@ -264,18 +199,34 @@ func (o *PullOptions) GetSkipTLSVerify() bool { return *o.SkipTLSVerify } -// WithPullPolicy -func (o *PullOptions) WithPullPolicy(value config.PullPolicy) *PullOptions { +// WithUsername +func (o *PullOptions) WithUsername(value string) *PullOptions { + v := &value + o.Username = v + return o +} + +// GetUsername +func (o *PullOptions) GetUsername() string { + var username string + if o.Username == nil { + return username + } + return *o.Username +} + +// WithVariant +func (o *PullOptions) WithVariant(value string) *PullOptions { v := &value - o.PullPolicy = v + o.Variant = v return o } -// GetPullPolicy -func (o *PullOptions) GetPullPolicy() config.PullPolicy { - var pullPolicy config.PullPolicy - if o.PullPolicy == nil { - return pullPolicy +// GetVariant +func (o *PullOptions) GetVariant() string { + var variant string + if o.Variant == nil { + return variant } - return *o.PullPolicy + return *o.Variant } diff --git a/pkg/bindings/images/types_push_options.go b/pkg/bindings/images/types_push_options.go index 0c12ce4ac..b7d8a6f2d 100644 --- a/pkg/bindings/images/types_push_options.go +++ b/pkg/bindings/images/types_push_options.go @@ -119,22 +119,6 @@ func (o *PushOptions) GetAuthfile() string { return *o.Authfile } -// WithCertDir -func (o *PushOptions) WithCertDir(value string) *PushOptions { - v := &value - o.CertDir = v - return o -} - -// GetCertDir -func (o *PushOptions) GetCertDir() string { - var certDir string - if o.CertDir == nil { - return certDir - } - return *o.CertDir -} - // WithCompress func (o *PushOptions) WithCompress(value bool) *PushOptions { v := &value @@ -151,54 +135,6 @@ func (o *PushOptions) GetCompress() bool { return *o.Compress } -// WithUsername -func (o *PushOptions) WithUsername(value string) *PushOptions { - v := &value - o.Username = v - return o -} - -// GetUsername -func (o *PushOptions) GetUsername() string { - var username string - if o.Username == nil { - return username - } - return *o.Username -} - -// WithPassword -func (o *PushOptions) WithPassword(value string) *PushOptions { - v := &value - o.Password = v - return o -} - -// GetPassword -func (o *PushOptions) GetPassword() string { - var password string - if o.Password == nil { - return password - } - return *o.Password -} - -// WithDigestFile -func (o *PushOptions) WithDigestFile(value string) *PushOptions { - v := &value - o.DigestFile = v - return o -} - -// GetDigestFile -func (o *PushOptions) GetDigestFile() string { - var digestFile string - if o.DigestFile == nil { - return digestFile - } - return *o.DigestFile -} - // WithFormat func (o *PushOptions) WithFormat(value string) *PushOptions { v := &value @@ -215,68 +151,20 @@ func (o *PushOptions) GetFormat() string { return *o.Format } -// WithQuiet -func (o *PushOptions) WithQuiet(value bool) *PushOptions { - v := &value - o.Quiet = v - return o -} - -// GetQuiet -func (o *PushOptions) GetQuiet() bool { - var quiet bool - if o.Quiet == nil { - return quiet - } - return *o.Quiet -} - -// WithRemoveSignatures -func (o *PushOptions) WithRemoveSignatures(value bool) *PushOptions { - v := &value - o.RemoveSignatures = v - return o -} - -// GetRemoveSignatures -func (o *PushOptions) GetRemoveSignatures() bool { - var removeSignatures bool - if o.RemoveSignatures == nil { - return removeSignatures - } - return *o.RemoveSignatures -} - -// WithSignaturePolicy -func (o *PushOptions) WithSignaturePolicy(value string) *PushOptions { - v := &value - o.SignaturePolicy = v - return o -} - -// GetSignaturePolicy -func (o *PushOptions) GetSignaturePolicy() string { - var signaturePolicy string - if o.SignaturePolicy == nil { - return signaturePolicy - } - return *o.SignaturePolicy -} - -// WithSignBy -func (o *PushOptions) WithSignBy(value string) *PushOptions { +// WithPassword +func (o *PushOptions) WithPassword(value string) *PushOptions { v := &value - o.SignBy = v + o.Password = v return o } -// GetSignBy -func (o *PushOptions) GetSignBy() string { - var signBy string - if o.SignBy == nil { - return signBy +// GetPassword +func (o *PushOptions) GetPassword() string { + var password string + if o.Password == nil { + return password } - return *o.SignBy + return *o.Password } // WithSkipTLSVerify @@ -294,3 +182,19 @@ func (o *PushOptions) GetSkipTLSVerify() bool { } return *o.SkipTLSVerify } + +// WithUsername +func (o *PushOptions) WithUsername(value string) *PushOptions { + v := &value + o.Username = v + return o +} + +// GetUsername +func (o *PushOptions) GetUsername() string { + var username string + if o.Username == nil { + return username + } + return *o.Username +} diff --git a/pkg/bindings/manifests/manifests.go b/pkg/bindings/manifests/manifests.go index fec9832a0..4634dd442 100644 --- a/pkg/bindings/manifests/manifests.go +++ b/pkg/bindings/manifests/manifests.go @@ -153,7 +153,6 @@ func Push(ctx context.Context, name, destination string, options *images.PushOpt } params.Set("image", name) params.Set("destination", destination) - params.Set("format", *options.Format) _, err = conn.DoRequest(nil, http.MethodPost, "/manifests/%s/push", params, nil, name) if err != nil { return "", err diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go index c200dd01a..285fd093a 100644 --- a/pkg/cgroups/cgroups.go +++ b/pkg/cgroups/cgroups.go @@ -24,6 +24,7 @@ var ( ErrCgroupDeleted = errors.New("cgroup deleted") // ErrCgroupV1Rootless means the cgroup v1 were attempted to be used in rootless environment ErrCgroupV1Rootless = errors.New("no support for CGroups V1 in rootless environments") + ErrStatCgroup = errors.New("no cgroup available for gathering user statistics") ) // CgroupControl controls a cgroup hierarchy @@ -525,10 +526,19 @@ func (c *CgroupControl) AddPid(pid int) error { // Stat returns usage statistics for the cgroup func (c *CgroupControl) Stat() (*Metrics, error) { m := Metrics{} + found := false for _, h := range handlers { if err := h.Stat(c, &m); err != nil { - return nil, err + if !os.IsNotExist(errors.Cause(err)) { + return nil, err + } + logrus.Warningf("Failed to retrieve cgroup stats: %v", err) + continue } + found = true + } + if !found { + return nil, ErrStatCgroup } return &m, nil } diff --git a/pkg/cgroups/cgroups_test.go b/pkg/cgroups/cgroups_test.go new file mode 100644 index 000000000..54315f7be --- /dev/null +++ b/pkg/cgroups/cgroups_test.go @@ -0,0 +1,32 @@ +package cgroups + +import ( + "testing" + + "github.com/containers/podman/v2/pkg/rootless" + spec "github.com/opencontainers/runtime-spec/specs-go" +) + +func TestCreated(t *testing.T) { + // tests only works in rootless mode + if rootless.IsRootless() { + return + } + + var resources spec.LinuxResources + cgr, err := New("machine.slice", &resources) + if err != nil { + t.Error(err) + } + if err := cgr.Delete(); err != nil { + t.Error(err) + } + + cgr, err = NewSystemd("machine.slice") + if err != nil { + t.Error(err) + } + if err := cgr.Delete(); err != nil { + t.Error(err) + } +} diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go index 4c1bd6a7d..2c32f792f 100644 --- a/pkg/domain/entities/containers.go +++ b/pkg/domain/entities/containers.go @@ -297,8 +297,8 @@ type ContainerListOptions struct { Pod bool Quiet bool Size bool + External bool Sort string - Storage bool Sync bool Watch uint } diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go index 524b29553..0c61714c3 100644 --- a/pkg/domain/infra/tunnel/containers.go +++ b/pkg/domain/infra/tunnel/containers.go @@ -173,19 +173,32 @@ func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []st } func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, opts entities.RmOptions) ([]*entities.RmReport, error) { - ctrs, err := getContainersByContext(ic.ClientCtx, opts.All, opts.Ignore, namesOrIds) - if err != nil { - return nil, err - } // TODO there is no endpoint for container eviction. Need to discuss - reports := make([]*entities.RmReport, 0, len(ctrs)) - options := new(containers.RemoveOptions).WithForce(opts.Force).WithVolumes(opts.Volumes) - for _, c := range ctrs { + options := new(containers.RemoveOptions).WithForce(opts.Force).WithVolumes(opts.Volumes).WithIgnore(opts.Ignore) + + if opts.All { + ctrs, err := getContainersByContext(ic.ClientCtx, opts.All, opts.Ignore, namesOrIds) + if err != nil { + return nil, err + } + reports := make([]*entities.RmReport, 0, len(ctrs)) + for _, c := range ctrs { + reports = append(reports, &entities.RmReport{ + Id: c.ID, + Err: containers.Remove(ic.ClientCtx, c.ID, options), + }) + } + return reports, nil + } + + reports := make([]*entities.RmReport, 0, len(namesOrIds)) + for _, name := range namesOrIds { reports = append(reports, &entities.RmReport{ - Id: c.ID, - Err: containers.Remove(ic.ClientCtx, c.ID, options), + Id: name, + Err: containers.Remove(ic.ClientCtx, name, options), }) } + return reports, nil } @@ -601,7 +614,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri func (ic *ContainerEngine) ContainerList(ctx context.Context, opts entities.ContainerListOptions) ([]entities.ListContainer, error) { options := new(containers.ListOptions).WithFilters(opts.Filters).WithAll(opts.All).WithLast(opts.Last) - options.WithNamespace(opts.Namespace).WithSize(opts.Size).WithSync(opts.Sync) + options.WithNamespace(opts.Namespace).WithSize(opts.Size).WithSync(opts.Sync).WithExternal(opts.External) return containers.List(ic.ClientCtx, options) } diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go index 0de756756..f10c8c175 100644 --- a/pkg/domain/infra/tunnel/images.go +++ b/pkg/domain/infra/tunnel/images.go @@ -106,8 +106,9 @@ func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOption func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, opts entities.ImagePullOptions) (*entities.ImagePullReport, error) { options := new(images.PullOptions) - options.WithAllTags(opts.AllTags).WithAuthfile(opts.Authfile).WithCertDir(opts.CertDir).WithArch(opts.Arch).WithOS(opts.OS) - options.WithVariant(opts.Variant).WithPassword(opts.Password).WithPullPolicy(opts.PullPolicy) + options.WithAllTags(opts.AllTags).WithAuthfile(opts.Authfile).WithArch(opts.Arch).WithOS(opts.OS) + options.WithVariant(opts.Variant).WithPassword(opts.Password) + options.WithQuiet(opts.Quiet).WithUsername(opts.Username) if s := opts.SkipTLSVerify; s != types.OptionalBoolUndefined { if s == types.OptionalBoolTrue { options.WithSkipTLSVerify(true) @@ -115,7 +116,6 @@ func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, opts entities. options.WithSkipTLSVerify(false) } } - options.WithQuiet(opts.Quiet).WithSignaturePolicy(opts.SignaturePolicy).WithUsername(opts.Username) pulledImages, err := images.Pull(ir.ClientCtx, rawImage, options) if err != nil { return nil, err @@ -236,10 +236,7 @@ func (ir *ImageEngine) Import(ctx context.Context, opts entities.ImageImportOpti func (ir *ImageEngine) Push(ctx context.Context, source string, destination string, opts entities.ImagePushOptions) error { options := new(images.PushOptions) - options.WithUsername(opts.Username).WithSignaturePolicy(opts.SignaturePolicy).WithQuiet(opts.Quiet) - options.WithPassword(opts.Password).WithCertDir(opts.CertDir).WithAuthfile(opts.Authfile) - options.WithCompress(opts.Compress).WithDigestFile(opts.DigestFile).WithFormat(opts.Format) - options.WithRemoveSignatures(opts.RemoveSignatures).WithSignBy(opts.SignBy) + options.WithAll(opts.All).WithCompress(opts.Compress).WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile).WithFormat(opts.Format) if s := opts.SkipTLSVerify; s != types.OptionalBoolUndefined { if s == types.OptionalBoolTrue { diff --git a/pkg/domain/infra/tunnel/manifest.go b/pkg/domain/infra/tunnel/manifest.go index c12ba0045..e261afee2 100644 --- a/pkg/domain/infra/tunnel/manifest.go +++ b/pkg/domain/infra/tunnel/manifest.go @@ -86,10 +86,8 @@ func (ir *ImageEngine) ManifestRemove(ctx context.Context, names []string) (stri // ManifestPush pushes a manifest list or image index to the destination func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination string, opts entities.ImagePushOptions) (string, error) { options := new(images.PushOptions) - options.WithUsername(opts.Username).WithSignaturePolicy(opts.SignaturePolicy).WithQuiet(opts.Quiet) - options.WithPassword(opts.Password).WithCertDir(opts.CertDir).WithAuthfile(opts.Authfile) - options.WithCompress(opts.Compress).WithDigestFile(opts.DigestFile).WithFormat(opts.Format) - options.WithRemoveSignatures(opts.RemoveSignatures).WithSignBy(opts.SignBy) + options.WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile) + options.WithAll(opts.All) if s := opts.SkipTLSVerify; s != types.OptionalBoolUndefined { if s == types.OptionalBoolTrue { diff --git a/pkg/ps/ps.go b/pkg/ps/ps.go index dc577890a..42f9e1d39 100644 --- a/pkg/ps/ps.go +++ b/pkg/ps/ps.go @@ -69,7 +69,7 @@ func GetContainerLists(runtime *libpod.Runtime, options entities.ContainerListOp pss = append(pss, listCon) } - if options.All && options.Storage { + if options.All && options.External { externCons, err := runtime.StorageContainers() if err != nil { return nil, err diff --git a/pkg/specgen/container_validate.go b/pkg/specgen/container_validate.go index a0d36f865..81cb8b78d 100644 --- a/pkg/specgen/container_validate.go +++ b/pkg/specgen/container_validate.go @@ -30,7 +30,7 @@ func exclusiveOptions(opt1, opt2 string) error { // input for creating a container. func (s *SpecGenerator) Validate() error { - if rootless.IsRootless() { + if rootless.IsRootless() && len(s.CNINetworks) == 0 { if s.StaticIP != nil || s.StaticIPv6 != nil { return ErrNoStaticIPRootless } diff --git a/pkg/specgen/generate/kube/kube.go b/pkg/specgen/generate/kube/kube.go index e39a700eb..0d7ee3ad2 100644 --- a/pkg/specgen/generate/kube/kube.go +++ b/pkg/specgen/generate/kube/kube.go @@ -3,6 +3,7 @@ package kube import ( "context" "fmt" + "net" "strings" "github.com/containers/common/pkg/parse" @@ -44,6 +45,31 @@ func ToPodGen(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec) podPorts := getPodPorts(podYAML.Spec.Containers) p.PortMappings = podPorts + if dnsConfig := podYAML.Spec.DNSConfig; dnsConfig != nil { + // name servers + if dnsServers := dnsConfig.Nameservers; len(dnsServers) > 0 { + servers := make([]net.IP, 0) + for _, server := range dnsServers { + servers = append(servers, net.ParseIP(server)) + } + p.DNSServer = servers + } + // search domans + if domains := dnsConfig.Searches; len(domains) > 0 { + p.DNSSearch = domains + } + // dns options + if options := dnsConfig.Options; len(options) > 0 { + dnsOptions := make([]string, 0) + for _, opts := range options { + d := opts.Name + if opts.Value != nil { + d += ":" + *opts.Value + } + dnsOptions = append(dnsOptions, d) + } + } + } return p, nil } diff --git a/pkg/specgen/pod_validate.go b/pkg/specgen/pod_validate.go index 7c81f3f9f..518adb32f 100644 --- a/pkg/specgen/pod_validate.go +++ b/pkg/specgen/pod_validate.go @@ -20,7 +20,7 @@ func exclusivePodOptions(opt1, opt2 string) error { // Validate verifies the input is valid func (p *PodSpecGenerator) Validate() error { - if rootless.IsRootless() { + if rootless.IsRootless() && len(p.CNINetworks) == 0 { if p.StaticIP != nil { return ErrNoStaticIPRootless } diff --git a/pkg/terminal/console_windows.go b/pkg/terminal/console_windows.go index c7691857c..08e66cb3a 100644 --- a/pkg/terminal/console_windows.go +++ b/pkg/terminal/console_windows.go @@ -30,7 +30,7 @@ func setConsoleMode(handle windows.Handle, flags uint32) error { if err := windows.SetConsoleMode(handle, mode|flags); err != nil { // In similar code, it is not considered an error if we cannot set the // console mode. Following same line of thinking here. - logrus.WithError(err).Error("Failed to set console mode for cli") + logrus.WithError(err).Debug("Failed to set console mode for cli") } return nil diff --git a/test/apiv2/20-containers.at b/test/apiv2/20-containers.at index decdc4754..0da196e46 100644 --- a/test/apiv2/20-containers.at +++ b/test/apiv2/20-containers.at @@ -237,3 +237,12 @@ t GET containers/$cid/json 200 \ t DELETE containers/$cid 204 t DELETE images/${MultiTagName}?force=true 200 # vim: filetype=sh + +# Test Volumes field adds an anonymous volume +t POST containers/create '"Image":"'$IMAGE'","Volumes":{"/test":{}}' 201 \ + .Id~[0-9a-f]\\{64\\} +cid=$(jq -r '.Id' <<<"$output") +t GET containers/$cid/json 200 \ + .Mounts[0].Destination="/test" + +t DELETE containers/$cid?v=true 204 diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go index 2668b1e7b..781bbb6d2 100644 --- a/test/e2e/common_test.go +++ b/test/e2e/common_test.go @@ -10,6 +10,7 @@ import ( "sort" "strconv" "strings" + "sync" "testing" "time" @@ -84,6 +85,7 @@ type testResultsSortedLength struct{ testResultsSorted } func (a testResultsSorted) Less(i, j int) bool { return a[i].length < a[j].length } var testResults []testResult +var testResultsMutex sync.Mutex func TestMain(m *testing.M) { if reexec.Init() { @@ -349,7 +351,9 @@ func (p *PodmanTestIntegration) InspectContainer(name string) []define.InspectCo func processTestResult(f GinkgoTestDescription) { tr := testResult{length: f.Duration.Seconds(), name: f.TestText} + testResultsMutex.Lock() testResults = append(testResults, tr) + testResultsMutex.Unlock() } func GetPortLock(port string) storage.Locker { diff --git a/test/e2e/create_staticip_test.go b/test/e2e/create_staticip_test.go index 7a2267617..698bbf976 100644 --- a/test/e2e/create_staticip_test.go +++ b/test/e2e/create_staticip_test.go @@ -49,7 +49,7 @@ var _ = Describe("Podman create with --ip flag", func() { }) It("Podman create --ip with non-allocatable IP", func() { - SkipIfRootless("--ip is not supported in rootless mode") + SkipIfRootless("--ip not supported without network in rootless mode") result := podmanTest.Podman([]string{"create", "--name", "test", "--ip", "203.0.113.124", ALPINE, "ls"}) result.WaitWithDefaultTimeout() Expect(result.ExitCode()).To(Equal(0)) @@ -63,7 +63,7 @@ var _ = Describe("Podman create with --ip flag", func() { ip := GetRandomIPAddress() result := podmanTest.Podman([]string{"create", "--name", "test", "--ip", ip, ALPINE, "ip", "addr"}) result.WaitWithDefaultTimeout() - // Rootless static ip assignment should error + // Rootless static ip assignment without network should error if rootless.IsRootless() { Expect(result.ExitCode()).To(Equal(125)) } else { @@ -81,7 +81,7 @@ var _ = Describe("Podman create with --ip flag", func() { }) It("Podman create two containers with the same IP", func() { - SkipIfRootless("--ip not supported in rootless mode") + SkipIfRootless("--ip not supported without network in rootless mode") ip := GetRandomIPAddress() result := podmanTest.Podman([]string{"create", "--name", "test1", "--ip", ip, ALPINE, "sleep", "999"}) result.WaitWithDefaultTimeout() diff --git a/test/e2e/create_staticmac_test.go b/test/e2e/create_staticmac_test.go index 1ac431da2..4c8f371a4 100644 --- a/test/e2e/create_staticmac_test.go +++ b/test/e2e/create_staticmac_test.go @@ -56,11 +56,7 @@ var _ = Describe("Podman run with --mac-address flag", func() { result := podmanTest.Podman([]string{"run", "--network", net, "--mac-address", "92:d0:c6:00:29:34", ALPINE, "ip", "addr"}) result.WaitWithDefaultTimeout() - if rootless.IsRootless() { - Expect(result.ExitCode()).To(Equal(125)) - } else { - Expect(result.ExitCode()).To(Equal(0)) - Expect(result.OutputToString()).To(ContainSubstring("92:d0:c6:00:29:34")) - } + Expect(result.ExitCode()).To(Equal(0)) + Expect(result.OutputToString()).To(ContainSubstring("92:d0:c6:00:29:34")) }) }) diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go index 73d92e5a0..67c08ac09 100644 --- a/test/e2e/create_test.go +++ b/test/e2e/create_test.go @@ -553,7 +553,7 @@ var _ = Describe("Podman create", func() { }) It("create container in pod with IP should fail", func() { - SkipIfRootless("Setting IP not supported in rootless mode") + SkipIfRootless("Setting IP not supported in rootless mode without network") name := "createwithstaticip" pod := podmanTest.RunTopContainerInPod("", "new:"+name) pod.WaitWithDefaultTimeout() @@ -565,7 +565,7 @@ var _ = Describe("Podman create", func() { }) It("create container in pod with mac should fail", func() { - SkipIfRootless("Setting MAC Address not supported in rootless mode") + SkipIfRootless("Setting MAC Address not supported in rootless mode without network") name := "createwithstaticmac" pod := podmanTest.RunTopContainerInPod("", "new:"+name) pod.WaitWithDefaultTimeout() diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go index 239817e6c..8800f9057 100644 --- a/test/e2e/generate_kube_test.go +++ b/test/e2e/generate_kube_test.go @@ -540,4 +540,67 @@ var _ = Describe("Podman generate kube", func() { kube.WaitWithDefaultTimeout() Expect(kube.ExitCode()).ToNot(Equal(0)) }) + + It("podman generate kube on a container with dns options", func() { + top := podmanTest.Podman([]string{"run", "-dt", "--name", "top", "--dns", "8.8.8.8", "--dns-search", "foobar.com", "--dns-opt", "color:blue", ALPINE, "top"}) + top.WaitWithDefaultTimeout() + Expect(top.ExitCode()).To(BeZero()) + + kube := podmanTest.Podman([]string{"generate", "kube", "top"}) + kube.WaitWithDefaultTimeout() + Expect(kube.ExitCode()).To(Equal(0)) + + pod := new(v1.Pod) + err := yaml.Unmarshal(kube.Out.Contents(), pod) + Expect(err).To(BeNil()) + + Expect(StringInSlice("8.8.8.8", pod.Spec.DNSConfig.Nameservers)).To(BeTrue()) + Expect(StringInSlice("foobar.com", pod.Spec.DNSConfig.Searches)).To(BeTrue()) + Expect(len(pod.Spec.DNSConfig.Options)).To(BeNumerically(">", 0)) + Expect(pod.Spec.DNSConfig.Options[0].Name).To(Equal("color")) + Expect(*pod.Spec.DNSConfig.Options[0].Value).To(Equal("blue")) + }) + + It("podman generate kube multiple contianer dns servers and options are cumulative", func() { + top1 := podmanTest.Podman([]string{"run", "-dt", "--name", "top1", "--dns", "8.8.8.8", "--dns-search", "foobar.com", ALPINE, "top"}) + top1.WaitWithDefaultTimeout() + Expect(top1.ExitCode()).To(BeZero()) + + top2 := podmanTest.Podman([]string{"run", "-dt", "--name", "top2", "--dns", "8.7.7.7", "--dns-search", "homer.com", ALPINE, "top"}) + top2.WaitWithDefaultTimeout() + Expect(top2.ExitCode()).To(BeZero()) + + kube := podmanTest.Podman([]string{"generate", "kube", "top1", "top2"}) + kube.WaitWithDefaultTimeout() + Expect(kube.ExitCode()).To(Equal(0)) + + pod := new(v1.Pod) + err := yaml.Unmarshal(kube.Out.Contents(), pod) + Expect(err).To(BeNil()) + + Expect(StringInSlice("8.8.8.8", pod.Spec.DNSConfig.Nameservers)).To(BeTrue()) + Expect(StringInSlice("8.7.7.7", pod.Spec.DNSConfig.Nameservers)).To(BeTrue()) + Expect(StringInSlice("foobar.com", pod.Spec.DNSConfig.Searches)).To(BeTrue()) + Expect(StringInSlice("homer.com", pod.Spec.DNSConfig.Searches)).To(BeTrue()) + }) + + It("podman generate kube on a pod with dns options", func() { + top := podmanTest.Podman([]string{"run", "--pod", "new:pod1", "-dt", "--name", "top", "--dns", "8.8.8.8", "--dns-search", "foobar.com", "--dns-opt", "color:blue", ALPINE, "top"}) + top.WaitWithDefaultTimeout() + Expect(top.ExitCode()).To(BeZero()) + + kube := podmanTest.Podman([]string{"generate", "kube", "pod1"}) + kube.WaitWithDefaultTimeout() + Expect(kube.ExitCode()).To(Equal(0)) + + pod := new(v1.Pod) + err := yaml.Unmarshal(kube.Out.Contents(), pod) + Expect(err).To(BeNil()) + + Expect(StringInSlice("8.8.8.8", pod.Spec.DNSConfig.Nameservers)).To(BeTrue()) + Expect(StringInSlice("foobar.com", pod.Spec.DNSConfig.Searches)).To(BeTrue()) + Expect(len(pod.Spec.DNSConfig.Options)).To(BeNumerically(">", 0)) + Expect(pod.Spec.DNSConfig.Options[0].Name).To(Equal("color")) + Expect(*pod.Spec.DNSConfig.Options[0].Value).To(Equal("blue")) + }) }) diff --git a/test/e2e/history_test.go b/test/e2e/history_test.go index fea3f4d43..1c57c60de 100644 --- a/test/e2e/history_test.go +++ b/test/e2e/history_test.go @@ -65,6 +65,23 @@ var _ = Describe("Podman history", func() { session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) Expect(len(session.OutputToStringArray())).To(BeNumerically(">", 0)) + + session = podmanTest.Podman([]string{"history", "--no-trunc", "--format", "{{.ID}}", ALPINE}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + lines := session.OutputToStringArray() + Expect(len(lines)).To(BeNumerically(">", 0)) + // the image id must be 64 chars long + Expect(len(lines[0])).To(BeNumerically("==", 64)) + + session = podmanTest.Podman([]string{"history", "--no-trunc", "--format", "{{.CreatedBy}}", ALPINE}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + lines = session.OutputToStringArray() + Expect(len(lines)).To(BeNumerically(">", 0)) + Expect(session.OutputToString()).ToNot(ContainSubstring("...")) + // the second line in the alpine history contains a command longer than 45 chars + Expect(len(lines[1])).To(BeNumerically(">", 45)) }) It("podman history with json flag", func() { diff --git a/test/e2e/inspect_test.go b/test/e2e/inspect_test.go index 97f77414e..8fc9721f9 100644 --- a/test/e2e/inspect_test.go +++ b/test/e2e/inspect_test.go @@ -443,4 +443,27 @@ var _ = Describe("Podman inspect", func() { Expect(inspect.OutputToString()).To(Equal(`"{"80/tcp":[{"HostIp":"","HostPort":"8080"}]}"`)) }) + It("Verify container inspect has default network", func() { + SkipIfRootless("Requires root CNI networking") + ctrName := "testctr" + session := podmanTest.Podman([]string{"run", "-d", "--name", ctrName, ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(BeZero()) + + inspect := podmanTest.InspectContainer(ctrName) + Expect(len(inspect)).To(Equal(1)) + Expect(len(inspect[0].NetworkSettings.Networks)).To(Equal(1)) + }) + + It("Verify stopped container still has default network in inspect", func() { + SkipIfRootless("Requires root CNI networking") + ctrName := "testctr" + session := podmanTest.Podman([]string{"create", "--name", ctrName, ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(BeZero()) + + inspect := podmanTest.InspectContainer(ctrName) + Expect(len(inspect)).To(Equal(1)) + Expect(len(inspect[0].NetworkSettings.Networks)).To(Equal(1)) + }) }) diff --git a/test/e2e/network_test.go b/test/e2e/network_test.go index e2080244b..2f5290c76 100644 --- a/test/e2e/network_test.go +++ b/test/e2e/network_test.go @@ -408,7 +408,6 @@ var _ = Describe("Podman network", func() { Expect(lines[1]).To(Equal(netName2)) }) It("podman network with multiple aliases", func() { - Skip("Until DNSName is updated on our CI images") var worked bool netName := "aliasTest" + stringid.GenerateNonCryptoID() session := podmanTest.Podman([]string{"network", "create", netName}) diff --git a/test/e2e/pod_create_test.go b/test/e2e/pod_create_test.go index be0a2f6f0..9c448a81e 100644 --- a/test/e2e/pod_create_test.go +++ b/test/e2e/pod_create_test.go @@ -233,7 +233,7 @@ var _ = Describe("Podman pod create", func() { ip := GetRandomIPAddress() podCreate := podmanTest.Podman([]string{"pod", "create", "--ip", ip, "--name", name}) podCreate.WaitWithDefaultTimeout() - // Rootless should error + // Rootless should error without network if rootless.IsRootless() { Expect(podCreate.ExitCode()).To(Equal(125)) } else { @@ -246,7 +246,7 @@ var _ = Describe("Podman pod create", func() { }) It("podman container in pod with IP address shares IP address", func() { - SkipIfRootless("Rootless does not support --ip") + SkipIfRootless("Rootless does not support --ip without network") podName := "test" ctrName := "testCtr" ip := GetRandomIPAddress() diff --git a/test/e2e/pod_inspect_test.go b/test/e2e/pod_inspect_test.go index 25212991d..fd9589afe 100644 --- a/test/e2e/pod_inspect_test.go +++ b/test/e2e/pod_inspect_test.go @@ -101,7 +101,7 @@ var _ = Describe("Podman pod inspect", func() { }) It("podman pod inspect outputs show correct MAC", func() { - SkipIfRootless("--mac-address is not supported in rootless mode") + SkipIfRootless("--mac-address is not supported in rootless mode without network") podName := "testPod" macAddr := "42:43:44:00:00:01" create := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--mac-address", macAddr}) diff --git a/test/e2e/ps_test.go b/test/e2e/ps_test.go index 13701fc3b..d12534219 100644 --- a/test/e2e/ps_test.go +++ b/test/e2e/ps_test.go @@ -396,11 +396,14 @@ var _ = Describe("Podman ps", func() { session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) - session = podmanTest.Podman([]string{"ps", "--pod", "--no-trunc"}) - + session = podmanTest.Podman([]string{"ps", "--no-trunc"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(Not(ContainSubstring(podid))) + session = podmanTest.Podman([]string{"ps", "--pod", "--no-trunc"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) Expect(session.OutputToString()).To(ContainSubstring(podid)) }) diff --git a/test/e2e/pull_test.go b/test/e2e/pull_test.go index 4b73004da..d47a3e47a 100644 --- a/test/e2e/pull_test.go +++ b/test/e2e/pull_test.go @@ -522,4 +522,31 @@ var _ = Describe("Podman pull", func() { Expect(data[0].Os).To(Equal(runtime.GOOS)) Expect(data[0].Architecture).To(Equal("arm64")) }) + + It("podman pull --arch", func() { + session := podmanTest.Podman([]string{"pull", "--arch=bogus", ALPINE}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(125)) + expectedError := "no image found in manifest list for architecture bogus" + Expect(session.ErrorToString()).To(ContainSubstring(expectedError)) + + session = podmanTest.Podman([]string{"pull", "--arch=arm64", "--os", "windows", ALPINE}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(125)) + expectedError = "no image found in manifest list for architecture" + Expect(session.ErrorToString()).To(ContainSubstring(expectedError)) + + session = podmanTest.Podman([]string{"pull", "-q", "--arch=arm64", ALPINE}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + setup := podmanTest.Podman([]string{"image", "inspect", session.OutputToString()}) + setup.WaitWithDefaultTimeout() + Expect(setup.ExitCode()).To(Equal(0)) + + data := setup.InspectImageJSON() // returns []inspect.ImageData + Expect(len(data)).To(Equal(1)) + Expect(data[0].Os).To(Equal(runtime.GOOS)) + Expect(data[0].Architecture).To(Equal("arm64")) + }) }) diff --git a/test/e2e/push_test.go b/test/e2e/push_test.go index 922995060..00b5802a3 100644 --- a/test/e2e/push_test.go +++ b/test/e2e/push_test.go @@ -54,10 +54,16 @@ var _ = Describe("Podman push", func() { fmt.Sprintf("dir:%s", bbdir)}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) + + bbdir = filepath.Join(podmanTest.TempDir, "busybox") + session = podmanTest.Podman([]string{"push", "--format", "oci", ALPINE, + fmt.Sprintf("dir:%s", bbdir)}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) }) It("podman push to local registry", func() { - SkipIfRemote("FIXME: This should work") + SkipIfRemote("Remote does not support --digestfile or --remove-signatures") if podmanTest.Host.Arch == "ppc64le" { Skip("No registry image for ppc64le") } @@ -74,7 +80,7 @@ var _ = Describe("Podman push", func() { Skip("Cannot start docker registry.") } - push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"}) + push := podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"}) push.WaitWithDefaultTimeout() Expect(push.ExitCode()).To(Equal(0)) @@ -88,7 +94,6 @@ var _ = Describe("Podman push", func() { }) It("podman push to local registry with authorization", func() { - SkipIfRemote("FIXME: This does not seem to be returning an error") SkipIfRootless("FIXME: Creating content in certs.d we use directories in homedir") if podmanTest.Host.Arch == "ppc64le" { Skip("No registry image for ppc64le") @@ -140,7 +145,7 @@ var _ = Describe("Podman push", func() { session = podmanTest.Podman([]string{"logs", "registry"}) session.WaitWithDefaultTimeout() - push := podmanTest.Podman([]string{"push", "--creds=podmantest:test", ALPINE, "localhost:5000/tlstest"}) + push := podmanTest.Podman([]string{"push", "--format=v2s2", "--creds=podmantest:test", ALPINE, "localhost:5000/tlstest"}) push.WaitWithDefaultTimeout() Expect(push).To(ExitWithError()) @@ -155,9 +160,12 @@ var _ = Describe("Podman push", func() { push.WaitWithDefaultTimeout() Expect(push).To(ExitWithError()) - push = podmanTest.Podman([]string{"push", "--creds=podmantest:test", "--cert-dir=fakedir", ALPINE, "localhost:5000/certdirtest"}) - push.WaitWithDefaultTimeout() - Expect(push).To(ExitWithError()) + if !IsRemote() { + // remote does not support --cert-dir + push = podmanTest.Podman([]string{"push", "--creds=podmantest:test", "--cert-dir=fakedir", ALPINE, "localhost:5000/certdirtest"}) + push.WaitWithDefaultTimeout() + Expect(push).To(ExitWithError()) + } push = podmanTest.Podman([]string{"push", "--creds=podmantest:test", ALPINE, "localhost:5000/defaultflags"}) push.WaitWithDefaultTimeout() diff --git a/test/e2e/rm_test.go b/test/e2e/rm_test.go index ca142d7f3..4c50a61ef 100644 --- a/test/e2e/rm_test.go +++ b/test/e2e/rm_test.go @@ -132,7 +132,7 @@ var _ = Describe("Podman rm", func() { latest := "-l" if IsRemote() { - latest = "test1" + latest = cid } result := podmanTest.Podman([]string{"rm", latest}) result.WaitWithDefaultTimeout() diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go index cbaae7186..ebea2132a 100644 --- a/test/e2e/run_networking_test.go +++ b/test/e2e/run_networking_test.go @@ -621,7 +621,6 @@ var _ = Describe("Podman run networking", func() { }) It("podman run in custom CNI network with --static-ip", func() { - SkipIfRootless("Rootless mode does not support --ip") netName := stringid.GenerateNonCryptoID() ipAddr := "10.25.30.128" create := podmanTest.Podman([]string{"network", "create", "--subnet", "10.25.30.0/24", netName}) @@ -633,10 +632,6 @@ var _ = Describe("Podman run networking", func() { run.WaitWithDefaultTimeout() Expect(run.ExitCode()).To(BeZero()) Expect(run.OutputToString()).To(ContainSubstring(ipAddr)) - - create = podmanTest.Podman([]string{"network", "rm", netName}) - create.WaitWithDefaultTimeout() - Expect(create.ExitCode()).To(BeZero()) }) It("podman rootless fails custom CNI network with --uidmap", func() { @@ -658,7 +653,6 @@ var _ = Describe("Podman run networking", func() { }) It("podman run with new:pod and static-ip", func() { - SkipIfRootless("Rootless does not support --ip") netName := stringid.GenerateNonCryptoID() ipAddr := "10.25.40.128" podname := "testpod" diff --git a/test/e2e/run_staticip_test.go b/test/e2e/run_staticip_test.go index 8383b1812..aeb462ae9 100644 --- a/test/e2e/run_staticip_test.go +++ b/test/e2e/run_staticip_test.go @@ -19,7 +19,7 @@ var _ = Describe("Podman run with --ip flag", func() { ) BeforeEach(func() { - SkipIfRootless("rootless does not support --ip") + SkipIfRootless("rootless does not support --ip without network") tempdir, err = CreateTempDirInTempDir() if err != nil { os.Exit(1) diff --git a/test/system/040-ps.bats b/test/system/040-ps.bats index 0ae8b0ce0..ae27c479f 100644 --- a/test/system/040-ps.bats +++ b/test/system/040-ps.bats @@ -82,11 +82,10 @@ load helpers run_podman rm -a } -@test "podman ps -a --storage" { - skip_if_remote "ps --storage does not work over remote" +@test "podman ps -a --external" { # Setup: ensure that we have no hidden storage containers - run_podman ps --storage -a + run_podman ps --external -a is "${#lines[@]}" "1" "setup check: no storage containers at start of test" # Force a buildah timeout; this leaves a buildah container behind @@ -98,18 +97,18 @@ EOF run_podman ps -a is "${#lines[@]}" "1" "podman ps -a does not see buildah container" - run_podman ps --storage -a - is "${#lines[@]}" "2" "podman ps -a --storage sees buildah container" + run_podman ps --external -a + is "${#lines[@]}" "2" "podman ps -a --external sees buildah container" is "${lines[1]}" \ "[0-9a-f]\{12\} \+$IMAGE *buildah .* seconds ago .* storage .* ${PODMAN_TEST_IMAGE_NAME}-working-container" \ - "podman ps --storage" + "podman ps --external" cid="${lines[1]:0:12}" # 'rm -a' should be a NOP run_podman rm -a - run_podman ps --storage -a - is "${#lines[@]}" "2" "podman ps -a --storage sees buildah container" + run_podman ps --external -a + is "${#lines[@]}" "2" "podman ps -a --external sees buildah container" # We can't rm it without -f, but podman should issue a helpful message run_podman 2 rm "$cid" @@ -118,7 +117,7 @@ EOF # With -f, we can remove it. run_podman rm -f "$cid" - run_podman ps --storage -a + run_podman ps --external -a is "${#lines[@]}" "1" "storage container has been removed" } diff --git a/test/system/075-exec.bats b/test/system/075-exec.bats index c028e16c9..badf44c49 100644 --- a/test/system/075-exec.bats +++ b/test/system/075-exec.bats @@ -6,8 +6,6 @@ load helpers @test "podman exec - basic test" { - skip_if_remote "FIXME: pending #7241" - rand_filename=$(random_string 20) rand_content=$(random_string 50) diff --git a/test/system/150-login.bats b/test/system/150-login.bats index 5151ab0e1..c3af63348 100644 --- a/test/system/150-login.bats +++ b/test/system/150-login.bats @@ -197,6 +197,7 @@ EOF destname=ok-$(random_string 10 | tr A-Z a-z)-ok # Use command-line credentials run_podman push --tls-verify=false \ + --format docker \ --creds ${PODMAN_LOGIN_USER}:${PODMAN_LOGIN_PASS} \ $IMAGE localhost:${PODMAN_LOGIN_REGISTRY_PORT}/$destname diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 25f02db19..0ad3069ce 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,6 +2,24 @@ # Changelog +## v1.19.3 (2021-01-28) + [ci:docs] Fix man page for buildah push + Vendor in containers/image v5.10.1 + Rebuild layer if a change in ARG is detected + Bump golang.org/x/crypto to latest rel-1.19 + local image lookup by digest + Use build-arg ENV val from local environment if set + Pick default OCI Runtime from containers.conf + +## v1.19.2 (2021-01-15) + If overlay mount point destination does not exists, do not throw error + Vendor in containers/common + +## v1.19.1 (2021-01-14) + Cherry pick localhost fix and update CI configuration for release-1.19 + use local image name for pull policy checks + Vendor in common 0.33.1 + ## v1.19.0 (2021-01-08) Update vendor of containers/storage and containers/common Buildah inspect should be able to inspect manifests diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 89fc860dd..4fbc475c2 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -28,7 +28,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.19.2" + Version = "1.19.3" // The value we use to identify what type of information, currently a // serialized Builder structure, we are using as per-container state. // This should only be changed when we make incompatible changes to diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index ce2f2696f..db2faf71a 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,3 +1,21 @@ +- Changelog for v1.19.3 (2021-01-28) + * [ci:docs] Fix man page for buildah push + * Vendor in containers/image v5.10.1 + * Rebuild layer if a change in ARG is detected + * Bump golang.org/x/crypto to latest rel-1.19 + * local image lookup by digest + * Use build-arg ENV val from local environment if set + * Pick default OCI Runtime from containers.conf + +- Changelog for v1.19.2 (2021-01-15) + * If overlay mount point destination does not exists, do not throw error + * Vendor in containers/common + +- Changelog for v1.19.1 (2021-01-14) + * Cherry pick localhost fix and update CI configuration for release-1.19 + * use local image name for pull policy checks + * Vendor in common 0.33.1 + - Changelog for v1.19.0 (2021-01-08) * Update vendor of containers/storage and containers/common * Buildah inspect should be able to inspect manifests diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod index 135926116..cccf42895 100644 --- a/vendor/github.com/containers/buildah/go.mod +++ b/vendor/github.com/containers/buildah/go.mod @@ -6,7 +6,7 @@ require ( github.com/containerd/containerd v1.4.1 // indirect github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 github.com/containers/common v0.33.1 - github.com/containers/image/v5 v5.9.0 + github.com/containers/image/v5 v5.10.1 github.com/containers/ocicrypt v1.0.3 github.com/containers/storage v1.24.5 github.com/docker/distribution v2.7.1+incompatible @@ -33,12 +33,12 @@ require ( github.com/sirupsen/logrus v1.7.0 github.com/spf13/cobra v1.1.1 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.6.1 + github.com/stretchr/testify v1.7.0 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 go.etcd.io/bbolt v1.3.5 - golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 + golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a - golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 + golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 gotest.tools/v3 v3.0.3 // indirect k8s.io/klog v1.0.0 // indirect ) diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum index 6a5f70a36..bf796c496 100644 --- a/vendor/github.com/containers/buildah/go.sum +++ b/vendor/github.com/containers/buildah/go.sum @@ -82,6 +82,8 @@ github.com/containers/common v0.33.1 h1:XpDiq8Cta8+u1s4kpYSEWdB140ZmqgyIXfWkLqKx github.com/containers/common v0.33.1/go.mod h1:mjDo/NKeweL/onaspLhZ38WnHXaYmrELHclIdvSnYpY= github.com/containers/image/v5 v5.9.0 h1:dRmUtcluQcmasNo3DpnRoZjfU0rOu1qZeL6wlDJr10Q= github.com/containers/image/v5 v5.9.0/go.mod h1:blOEFd/iFdeyh891ByhCVUc+xAcaI3gBegXECwz9UbQ= +github.com/containers/image/v5 v5.10.1 h1:tHhGQ8RCMxJfJLD/PEW1qrOKX8nndledW9qz6UiAxns= +github.com/containers/image/v5 v5.10.1/go.mod h1:JlRLJZv7elVbtHaaaR6Kz8i6G3k2ttj4t7fubwxD9Hs= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c= @@ -238,6 +240,8 @@ github.com/klauspost/compress v1.11.3 h1:dB4Bn0tN3wdCzQxnS8r06kV74qN/TAfaIS0bVE8 github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.5 h1:xNCE0uE6yvTPRS+0wGNMHPo3NIpwnk6aluQZ6R6kRcc= github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= @@ -404,6 +408,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -412,6 +418,8 @@ github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.9 h1:RsKRIA2MO8x56wkkcd3LbtcE/uMszhb6DpRf+3uwa3I= +github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -419,6 +427,8 @@ github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02 github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= github.com/vbauerster/mpb/v5 v5.3.0 h1:vgrEJjUzHaSZKDRRxul5Oh4C72Yy/5VEMb0em+9M0mQ= github.com/vbauerster/mpb/v5 v5.3.0/go.mod h1:4yTkvAb8Cm4eylAp6t0JRq6pXDkFJ4krUlDqWYkakAs= +github.com/vbauerster/mpb/v5 v5.4.0 h1:n8JPunifvQvh6P1D1HAl2Ur9YcmKT1tpoUuiea5mlmg= +github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8JDaKXR4JGI= github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= @@ -456,6 +466,8 @@ golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -527,6 +539,7 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -543,6 +556,10 @@ golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 h1:+CBz4km/0KPU3RGTwARGh/noP3bEwtHcq+0YcBQM2JQ= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index 191645b89..9c15785bc 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -834,7 +834,11 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // Check if there's already an image based on our parent that // has the same change that we're about to make, so far as we // can tell. - if checkForLayers { + // Only do this if there were no build args given by the user, + // we need to call ib.Run() to correctly put the args together before + // determining if a cached layer with the same build args already exists + // and that is done in the if block below. + if checkForLayers && s.builder.Args == nil { cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) if err != nil { return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build") @@ -1022,6 +1026,9 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri return "/bin/sh" } switch strings.ToUpper(node.Value) { + case "ARG": + buildArgs := s.getBuildArgs() + return "/bin/sh -c #(nop) ARG " + buildArgs case "RUN": buildArgs := s.getBuildArgs() if buildArgs != "" { diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go index 4d70e0146..2ee86dd13 100644 --- a/vendor/github.com/containers/buildah/new.go +++ b/vendor/github.com/containers/buildah/new.go @@ -150,10 +150,10 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store return nil, "", nil, err } - // If we could resolve the image locally, check if it was referenced by - // ID. In that case, we don't need to bother any further and can - // prevent prompting the user. - if localImage != nil && strings.HasPrefix(localImage.ID, options.FromImage) { + // If we could resolve the image locally, check if it was clearly + // referring to a local image, either by ID or digest. In that case, + // we don't need to perform a remote lookup. + if localImage != nil && (strings.HasPrefix(localImage.ID, options.FromImage) || strings.HasPrefix(options.FromImage, "sha256:")) { return localImageRef, localImageRef.Transport().Name(), localImage, nil } diff --git a/vendor/github.com/containers/buildah/util/types.go b/vendor/github.com/containers/buildah/util/types.go index dc5f4b6c8..ca0f31532 100644 --- a/vendor/github.com/containers/buildah/util/types.go +++ b/vendor/github.com/containers/buildah/util/types.go @@ -1,7 +1,7 @@ package util const ( - // DefaultRuntime is the default command to use to run the container. + // Deprecated: Default runtime should come from containers.conf DefaultRuntime = "runc" // DefaultCNIPluginPath is the default location of CNI plugin helpers. DefaultCNIPluginPath = "/usr/libexec/cni:/opt/cni/bin" diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index 99f68d9e1..338c4503a 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -20,6 +20,7 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/docker/distribution/registry/api/errcode" + "github.com/opencontainers/go-digest" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -68,6 +69,19 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto return []string{img.ID}, "", false, nil } } + // If we're referring to an image by digest, it *must* be local and we + // should not have any fall through/back logic. + if strings.HasPrefix(name, "sha256:") { + d, err := digest.Parse(name) + if err != nil { + return nil, "", false, err + } + img, err := store.Image(d.Encoded()) + if err != nil { + return nil, "", false, err + } + return []string{img.ID}, "", false, nil + } // Transports are not supported for local image look ups. srcRef, err := alltransports.ParseImageName(name) @@ -263,7 +277,12 @@ func Runtime() string { return "crun" } - return DefaultRuntime + conf, err := config.Default() + if err != nil { + logrus.Warnf("Error loading container config when searching for local runtime: %v", err) + return DefaultRuntime + } + return conf.Engine.OCIRuntime } // StringInSlice returns a boolean indicating if the exact value s is present diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index 485db4d30..b5c755e18 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -14,6 +14,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" + internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/pkg/platform" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache" @@ -47,7 +48,7 @@ var ( // maxParallelDownloads is used to limit the maxmimum number of parallel // downloads. Let's follow Firefox by limiting it to 6. - maxParallelDownloads = 6 + maxParallelDownloads = uint(6) ) // compressionBufferSize is the buffer size used to compress a blob @@ -107,18 +108,19 @@ func (d *digestingReader) Read(p []byte) (int, error) { // copier allows us to keep track of diffID values for blobs, and other // data shared across one or more images in a possible manifest list. type copier struct { - dest types.ImageDestination - rawSource types.ImageSource - reportWriter io.Writer - progressOutput io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties - blobInfoCache types.BlobInfoCache - copyInParallel bool - compressionFormat compression.Algorithm - compressionLevel *int - ociDecryptConfig *encconfig.DecryptConfig - ociEncryptConfig *encconfig.EncryptConfig + dest types.ImageDestination + rawSource types.ImageSource + reportWriter io.Writer + progressOutput io.Writer + progressInterval time.Duration + progress chan types.ProgressProperties + blobInfoCache internalblobinfocache.BlobInfoCache2 + copyInParallel bool + compressionFormat compression.Algorithm + compressionLevel *int + ociDecryptConfig *encconfig.DecryptConfig + ociEncryptConfig *encconfig.EncryptConfig + maxParallelDownloads uint } // imageCopier tracks state specific to a single image (possibly an item of a manifest list) @@ -190,6 +192,8 @@ type Options struct { // OciDecryptConfig contains the config that can be used to decrypt an image if it is // encrypted if non-nil. If nil, it does not attempt to decrypt an image. OciDecryptConfig *encconfig.DecryptConfig + // MaxParallelDownloads indicates the maximum layers to pull at the same time. A reasonable default is used if this is left as 0. + MaxParallelDownloads uint } // validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value @@ -265,9 +269,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually // we might want to add a separate CommonCtx — or would that be too confusing? - blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx), - ociDecryptConfig: options.OciDecryptConfig, - ociEncryptConfig: options.OciEncryptConfig, + blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)), + ociDecryptConfig: options.OciDecryptConfig, + ociEncryptConfig: options.OciEncryptConfig, + maxParallelDownloads: options.MaxParallelDownloads, } // Default to using gzip compression unless specified otherwise. if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil { @@ -648,13 +653,19 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support // without actually trying to upload something and getting a types.ManifestTypeRejectedError. - // So, try the preferred manifest MIME type. If the process succeeds, fine… + // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if + // we're altering how they're compressed. If the process succeeds, fine… manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) retManifestType = preferredManifestMIMEType if err != nil { logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) - // … if it fails, _and_ the failure is because the manifest is rejected, we may have other options. - if _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError); !isManifestRejected || len(otherManifestMIMETypeCandidates) == 0 { + // … if it fails, and the failure is either because the manifest is rejected by the registry, or + // because we failed to create a manifest of the specified type because the specific manifest type + // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may + // have other options available that could still succeed. + _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError) + _, isCompressionIncompatible := errors.Cause(err).(manifest.ManifestLayerCompressionIncompatibilityError) + if (!isManifestRejected && !isCompressionIncompatible) || len(otherManifestMIMETypeCandidates) == 0 { // We don’t have other options. // In principle the code below would handle this as well, but the resulting error message is fairly ugly. // Don’t bother the user with MIME types if we have no choice. @@ -809,7 +820,11 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { // avoid malicious images causing troubles and to be nice to servers. var copySemaphore *semaphore.Weighted if ic.c.copyInParallel { - copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads)) + max := ic.c.maxParallelDownloads + if max == 0 { + max = maxParallelDownloads + } + copySemaphore = semaphore.NewWeighted(int64(max)) } else { copySemaphore = semaphore.NewWeighted(int64(1)) } @@ -896,7 +911,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { return nil } -// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields) +// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields) func layerDigestsDiffer(a, b []types.BlobInfo) bool { if len(a) != len(b) { return true @@ -951,7 +966,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc instanceDigest = &manifestDigest } if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { - return nil, "", errors.Wrap(err, "Error writing manifest") + return nil, "", errors.Wrapf(err, "Error writing manifest %q", string(man)) } return man, manifestDigest, nil } @@ -1049,7 +1064,7 @@ type diffIDResult struct { err error } -// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps compressing it if canCompress, +// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it, // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) { cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" @@ -1058,6 +1073,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. if !diffIDIsNeeded { + // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm + // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing + // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause + // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. + // Fixing that will probably require passing more information to TryReusingBlob() than the current version of + // the ImageDestination interface lets us pass in. reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest) @@ -1115,7 +1136,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. // it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, -// perhaps compressing the stream if canCompress, +// perhaps (de/re/)compressing the stream, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) { @@ -1191,11 +1212,15 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) { // copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, // perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, -// perhaps compressing it if canCompress, +// perhaps (de/re/)compressing it if canModifyBlob, // and returns a complete blobInfo of the copied blob. func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, error) { + if isConfig { // This is guaranteed by the caller, but set it here to be explicit. + canModifyBlob = false + } + // The copying happens through a pipeline of connected io.Readers. // === Input: srcStream @@ -1253,16 +1278,23 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr originalLayerReader = destStream } - desiredCompressionFormat := c.compressionFormat - // === Deal with layer compression/decompression if necessary var inputInfo types.BlobInfo var compressionOperation types.LayerCompression + uploadCompressionFormat := &c.compressionFormat + srcCompressorName := internalblobinfocache.Uncompressed + if isCompressed { + srcCompressorName = compressionFormat.Name() + } + var uploadCompressorName string if canModifyBlob && isOciEncrypted(srcInfo.MediaType) { // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted logrus.Debugf("Using original blob without modification for encrypted blob") compressionOperation = types.PreserveOriginal inputInfo = srcInfo + srcCompressorName = internalblobinfocache.UnknownCompression + uploadCompressorName = internalblobinfocache.UnknownCompression + uploadCompressionFormat = nil } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed { logrus.Debugf("Compressing blob on the fly") compressionOperation = types.Compress @@ -1272,11 +1304,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, // we don’t care. - go c.compressGoroutine(pipeWriter, destStream, desiredCompressionFormat) // Closes pipeWriter + go c.compressGoroutine(pipeWriter, destStream, *uploadCompressionFormat) // Closes pipeWriter destStream = pipeReader inputInfo.Digest = "" inputInfo.Size = -1 - } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && desiredCompressionFormat.Name() != compressionFormat.Name() { + uploadCompressorName = uploadCompressionFormat.Name() + } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && uploadCompressionFormat.Name() != compressionFormat.Name() { // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally // re-compressed using the desired format. logrus.Debugf("Blob will be converted") @@ -1291,11 +1324,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr pipeReader, pipeWriter := io.Pipe() defer pipeReader.Close() - go c.compressGoroutine(pipeWriter, s, desiredCompressionFormat) // Closes pipeWriter + go c.compressGoroutine(pipeWriter, s, *uploadCompressionFormat) // Closes pipeWriter destStream = pipeReader inputInfo.Digest = "" inputInfo.Size = -1 + uploadCompressorName = uploadCompressionFormat.Name() } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed { logrus.Debugf("Blob will be decompressed") compressionOperation = types.Decompress @@ -1307,11 +1341,15 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr destStream = s inputInfo.Digest = "" inputInfo.Size = -1 + uploadCompressorName = internalblobinfocache.Uncompressed + uploadCompressionFormat = nil } else { // PreserveOriginal might also need to recompress the original blob if the desired compression format is different. logrus.Debugf("Using original blob without modification") compressionOperation = types.PreserveOriginal inputInfo = srcInfo + uploadCompressorName = srcCompressorName + uploadCompressionFormat = nil } // Perform image encryption for valid mediatypes if ociEncryptConfig provided @@ -1371,9 +1409,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr uploadedInfo.CompressionOperation = compressionOperation // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. - if canModifyBlob && !isConfig { - uploadedInfo.CompressionAlgorithm = &desiredCompressionFormat - } + uploadedInfo.CompressionAlgorithm = uploadCompressionFormat if decrypted { uploadedInfo.CryptoOperation = types.Decrypt } else if encrypted { @@ -1390,7 +1426,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } } - // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer + // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. // So, read everything from originalLayerReader, which will cause the rest to be // sent there if we are not already at EOF. @@ -1423,6 +1459,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr default: return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) } + if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName) + } + if srcInfo.Digest != "" && srcCompressorName != "" && srcCompressorName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName) + } } return uploadedInfo, nil } diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index 2b81c8360..5cafd2674 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -194,7 +194,9 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { @@ -210,7 +212,6 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo return false, types.BlobInfo{}, err } return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil - } // PutManifest writes manifest to the destination. @@ -251,7 +252,7 @@ func pathExists(path string) (bool, error) { if err == nil { return true, nil } - if err != nil && os.IsNotExist(err) { + if os.IsNotExist(err) { return false, nil } return false, err diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 797be45a2..be46508de 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -23,6 +23,7 @@ import ( "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/pkg/tlsclientconfig" "github.com/containers/image/v5/types" + "github.com/containers/image/v5/version" "github.com/containers/storage/pkg/homedir" clientLib "github.com/docker/distribution/registry/client" "github.com/docker/go-connections/tlsconfig" @@ -65,6 +66,8 @@ var ( {path: "/etc/containers/certs.d", absolute: true}, {path: "/etc/docker/certs.d", absolute: true}, } + + defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)" ) // extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: @@ -92,8 +95,9 @@ type bearerToken struct { // dockerClient is configuration for dealing with a single Docker registry. type dockerClient struct { // The following members are set by newDockerClient and do not change afterwards. - sys *types.SystemContext - registry string + sys *types.SystemContext + registry string + userAgent string // tlsClientConfig is setup by newDockerClient and will be used and updated // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. @@ -200,9 +204,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { logrus.Debugf("error accessing certs directory due to permissions: %v", err) continue } - if err != nil { - return "", err - } + return "", err } return fullCertDirPath, nil } @@ -277,9 +279,15 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc } tlsClientConfig.InsecureSkipVerify = skipVerify + userAgent := defaultUserAgent + if sys != nil && sys.DockerRegistryUserAgent != "" { + userAgent = sys.DockerRegistryUserAgent + } + return &dockerClient{ sys: sys, registry: registry, + userAgent: userAgent, tlsClientConfig: tlsClientConfig, }, nil } @@ -529,9 +537,7 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, req.Header.Add(n, hh) } } - if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { - req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) - } + req.Header.Add("User-Agent", c.userAgent) if auth == v2Auth { if err := c.setupRequestAuth(req, extraScope); err != nil { return nil, err @@ -637,9 +643,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall params.Add("client_id", "containers/image") authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode())) - if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { - authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) - } + authReq.Header.Add("User-Agent", c.userAgent) authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded") logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) res, err := c.client.Do(authReq) @@ -692,9 +696,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, if c.auth.Username != "" && c.auth.Password != "" { authReq.SetBasicAuth(c.auth.Username, c.auth.Password) } - if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { - authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) - } + authReq.Header.Add("User-Agent", c.userAgent) logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) res, err := c.client.Do(authReq) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index ac63ac121..842dcfba6 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -15,6 +15,7 @@ import ( "strings" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/iolimits" "github.com/containers/image/v5/internal/uploadreader" "github.com/containers/image/v5/manifest" @@ -284,7 +285,9 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { @@ -299,17 +302,23 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types. } if exists { cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) - return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil + return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil } // Then try reusing blobs from other locations. - for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) { + bic := blobinfocache.FromBlobInfoCache(cache) + candidates := bic.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) + for _, candidate := range candidates { candidateRepo, err := parseBICLocationReference(candidate.Location) if err != nil { logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) continue } - logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name()) + if candidate.CompressorName != blobinfocache.Uncompressed { + logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name()) + } else { + logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name()) + } // Sanity checks: if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { @@ -351,8 +360,16 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types. continue } } - cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) - return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil + + bic.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) + + compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName) + if err != nil { + logrus.Debugf("... Failed: %v", err) + continue + } + + return true, types.BlobInfo{Digest: candidate.Digest, MediaType: info.MediaType, Size: size, CompressionOperation: compressionOperation, CompressionAlgorithm: compressionAlgorithm}, nil } return false, types.BlobInfo{}, nil diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 70ca7661e..bff950bb0 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -64,6 +64,11 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef } attempts := []attempt{} for _, pullSource := range pullSources { + if sys != nil && sys.DockerLogMirrorChoice { + logrus.Infof("Trying to access %q", pullSource.Reference) + } else { + logrus.Debugf("Trying to access %q", pullSource.Reference) + } logrus.Debugf("Trying to access %q", pullSource.Reference) s, err := newImageSourceAttempt(ctx, sys, ref, pullSource) if err == nil { diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go index 41d2c5e81..9559dfb56 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -159,7 +159,9 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/lookaside.go index 06d616d01..0d5d8d82a 100644 --- a/vendor/github.com/containers/image/v5/docker/lookaside.go +++ b/vendor/github.com/containers/image/v5/docker/lookaside.go @@ -21,7 +21,7 @@ import ( // systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. // You can override this at build time with -// -ldflags '-X github.com/containers/image/docker.systemRegistriesDirPath=$your_path' +// -ldflags '-X github.com/containers/image/v5/docker.systemRegistriesDirPath=$your_path' var systemRegistriesDirPath = builtinRegistriesDirPath // builtinRegistriesDirPath is the path to registries.d. @@ -96,10 +96,16 @@ func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, // registriesDirPath returns a path to registries.d func registriesDirPath(sys *types.SystemContext) string { + return registriesDirPathWithHomeDir(sys, homedir.Get()) +} + +// registriesDirPathWithHomeDir is an internal implementation detail of registriesDirPath, +// it exists only to allow testing it with an artificial home directory. +func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) string { if sys != nil && sys.RegistriesDirPath != "" { return sys.RegistriesDirPath } - userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir) + userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir) if _, err := os.Stat(userRegistriesDirPath); err == nil { return userRegistriesDirPath } diff --git a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go index 61d9aab9a..94e9e5f23 100644 --- a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go +++ b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go @@ -52,5 +52,26 @@ func DockerReferenceNamespaces(ref reference.Named) []string { } name = name[:lastSlash] } + + // Strip port number if any, before appending to res slice. + // Currently, the most compatible behavior is to return + // example.com:8443/ns, example.com:8443, *.com. + // If a port number is not specified, the expected behavior would be + // example.com/ns, example.com, *.com + portNumColon := strings.Index(name, ":") + if portNumColon != -1 { + name = name[:portNumColon] + } + + // Append wildcarded domains to res slice + for { + firstDot := strings.Index(name, ".") + if firstDot == -1 { + break + } + name = name[firstDot+1:] + + res = append(res, "*."+name) + } return res } diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go index e16829d96..4f2465cac 100644 --- a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go @@ -86,7 +86,9 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go index e4e01d5d9..61ca83364 100644 --- a/vendor/github.com/containers/image/v5/image/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go @@ -154,6 +154,9 @@ func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError +// if the CompressionOperation and CompressionAlgorithm specified in one or more +// options.LayerInfos items is anything other than gzip. func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. src: m.src, diff --git a/vendor/github.com/containers/image/v5/image/oci.go b/vendor/github.com/containers/image/v5/image/oci.go index 5cb04f979..58e9c03ba 100644 --- a/vendor/github.com/containers/image/v5/image/oci.go +++ b/vendor/github.com/containers/image/v5/image/oci.go @@ -134,6 +134,10 @@ func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdat // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError +// if the combination of CompressionOperation and CompressionAlgorithm specified +// in one or more options.LayerInfos items indicates that a layer is compressed using +// an algorithm that is not allowed in OCI. func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. src: m.src, diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go new file mode 100644 index 000000000..1dceaa669 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go @@ -0,0 +1,63 @@ +package blobinfocache + +import ( + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" +) + +// FromBlobInfoCache returns a BlobInfoCache2 based on a BlobInfoCache, returning the original +// object if it implements BlobInfoCache2, or a wrapper which discards compression information +// if it only implements BlobInfoCache. +func FromBlobInfoCache(bic types.BlobInfoCache) BlobInfoCache2 { + if bic2, ok := bic.(BlobInfoCache2); ok { + return bic2 + } + return &v1OnlyBlobInfoCache{ + BlobInfoCache: bic, + } +} + +type v1OnlyBlobInfoCache struct { + types.BlobInfoCache +} + +func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { +} + +func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 { + return nil +} + +// CandidateLocationsFromV2 converts a slice of BICReplacementCandidate2 to a slice of +// types.BICReplacementCandidate, dropping compression information. +func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.BICReplacementCandidate { + candidates := make([]types.BICReplacementCandidate, 0, len(v2candidates)) + for _, c := range v2candidates { + candidates = append(candidates, types.BICReplacementCandidate{ + Digest: c.Digest, + Location: c.Location, + }) + } + return candidates +} + +// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm +// values suitable for inclusion in a types.BlobInfo structure, based on the name of the +// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by +// TryReusingBlob() implementations to set values in the BlobInfo structure that they return +// upon success. +func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compression.Algorithm, error) { + switch compressorName { + case Uncompressed: + return types.Decompress, nil, nil + case UnknownCompression: + return types.PreserveOriginal, nil, nil + default: + algo, err := compression.AlgorithmByName(compressorName) + if err == nil { + return types.Compress, &algo, nil + } + return types.PreserveOriginal, nil, err + } +} diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go new file mode 100644 index 000000000..3c2be57f3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go @@ -0,0 +1,45 @@ +package blobinfocache + +import ( + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" +) + +const ( + // Uncompressed is the value we store in a blob info cache to indicate that we know that + // the blob in the corresponding location is not compressed. + Uncompressed = "uncompressed" + // UnknownCompression is the value we store in a blob info cache to indicate that we don't + // know if the blob in the corresponding location is compressed (and if so, how) or not. + UnknownCompression = "unknown" +) + +// BlobInfoCache2 extends BlobInfoCache by adding the ability to track information about what kind +// of compression was applied to the blobs it keeps information about. +type BlobInfoCache2 interface { + types.BlobInfoCache + // RecordDigestCompressorName records a compressor for the blob with the specified digest, + // or Uncompressed or UnknownCompression. + // WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a + // digest just because some remote author claims so (e.g. because a manifest says so); + // otherwise the cache could be poisoned and cause us to make incorrect edits to type + // information in a manifest. + RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) + // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations + // that could possibly be reused within the specified (transport scope) (if they still + // exist, which is not guaranteed). + // + // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if + // canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look + // up variants of the blob which have the same uncompressed digest. + // + // The CompressorName fields in returned data must never be UnknownCompression. + CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 +} + +// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. +type BICReplacementCandidate2 struct { + Digest digest.Digest + CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression + Location types.BICLocationReference +} diff --git a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go index a3081f4f2..809446e18 100644 --- a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go +++ b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go @@ -9,7 +9,7 @@ import ( // unixTempDirForBigFiles is the directory path to store big files on non Windows systems. // You can override this at build time with -// -ldflags '-X github.com/containers/image/internal/tmpdir.unixTempDirForBigFiles=$your_path' +// -ldflags '-X github.com/containers/image/v5/internal/tmpdir.unixTempDirForBigFiles=$your_path' var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles // builtinUnixTempDirForBigFiles is the directory path to store big files. diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index fa2b39e0e..3ece948a0 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -5,7 +5,6 @@ import ( "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/types" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -54,6 +53,12 @@ const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that mean // compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil // to mean "no compression"), based on variantTable. +// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants +// that differ only in what type of compression is applied, but it can't be combined with this +// algorithm to produce an updated MIME type that complies with the standard that defines mimeType. +// If the compression algorithm is unrecognized, or mimeType is not known to have variants that +// differ from it only in what type of compression has been applied, the returned error will not be +// a ManifestLayerCompressionIncompatibilityError. func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compression.Algorithm) (string, error) { if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries return "", fmt.Errorf("cannot update unknown MIME type") @@ -70,15 +75,15 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType return res, nil } if name != mtsUncompressed { - return "", fmt.Errorf("%s compression is not supported", name) + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mt)} } - return "", errors.New("uncompressed variant is not supported") + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} } if name != mtsUncompressed { - return "", fmt.Errorf("unknown compression algorithm %s", name) + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mt)} } // We can't very well say “the idea of no compression is unknown” - return "", errors.New("uncompressed variant is not supported") + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} } } } @@ -89,7 +94,11 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType } // updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to -// mimeType, based on variantTable. It may use updated.Digest for error messages. +// mimeType, based on variantTable. It may use updated.Digest for error messages. +// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants +// that differ only in what type of compression is applied, but applying updated.CompressionOperation +// and updated.CompressionAlgorithm to it won't produce an updated MIME type that complies with the +// standard that defines mimeType. func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, updated types.BlobInfo) (string, error) { // Note that manifests in containers-storage might be reporting the // wrong media type since the original manifests are stored while layers @@ -99,6 +108,12 @@ func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, upd // {de}compressed. switch updated.CompressionOperation { case types.PreserveOriginal: + // Force a change to the media type if we're being told to use a particular compressor, + // since it might be different from the one associated with the media type. Otherwise, + // try to keep the original media type. + if updated.CompressionAlgorithm != nil { + return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm) + } // Keep the original media type. return mimeType, nil @@ -116,3 +131,14 @@ func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, upd return "", fmt.Errorf("unknown compression operation (%d)", updated.CompressionOperation) } } + +// ManifestLayerCompressionIncompatibilityError indicates that a specified compression algorithm +// could not be applied to a layer MIME type. A caller that receives this should either retry +// the call with a different compression algorithm, or attempt to use a different manifest type. +type ManifestLayerCompressionIncompatibilityError struct { + text string +} + +func (m ManifestLayerCompressionIncompatibilityError) Error() string { + return m.text +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go index 8d8bb9e01..6cb605263 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -226,6 +226,8 @@ var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{ } // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and +// CompressionAlgorithm that would result in anything other than gzip compression. func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { if len(m.LayersDescriptors) != len(layerInfos) { return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index 292614593..c6299d8e6 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -108,6 +108,8 @@ var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{ } // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers) +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and +// CompressionAlgorithm that isn't supported by OCI. func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { if len(m.Layers) != len(layerInfos) { return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go index 23d471325..c874eb775 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -103,7 +103,9 @@ func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Read // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index 0c88e1ef0..1230e8ca3 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -186,7 +186,9 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { @@ -204,6 +206,7 @@ func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.Blo if err != nil { return false, types.BlobInfo{}, err } + return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go index c4c84dd54..426046e66 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -410,7 +410,9 @@ func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reade // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go index b518122e2..c91a49c57 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -339,7 +339,9 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go index 200dab593..2c211b8b8 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go @@ -7,6 +7,7 @@ import ( "sync" "time" + "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" @@ -22,6 +23,9 @@ var ( // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. uncompressedDigestBucket = []byte("uncompressedDigest") + // digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed + // It may not exist in caches created by older versions, even if uncompressedDigestBucket is present. + digestCompressorBucket = []byte("digestCompressor") // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest // (as a set of key=digest, value="" pairs) digestByUncompressedBucket = []byte("digestByUncompressed") @@ -95,6 +99,9 @@ type cache struct { // // Most users should call blobinfocache.DefaultCache instead. func New(path string) types.BlobInfoCache { + return new2(path) +} +func new2(path string) *cache { return &cache{path: path} } @@ -220,6 +227,30 @@ func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } +// RecordDigestCompressorName records that the blob with digest anyDigest was compressed with the specified +// compressor, or is blobinfocache.Uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (bdc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(digestCompressorBucket) + if err != nil { + return err + } + key := []byte(anyDigest.String()) + if previousBytes := b.Get(key); previousBytes != nil { + if string(previousBytes) != compressorName { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, string(previousBytes), compressorName) + } + } + if compressorName == blobinfocache.UnknownCompression { + return b.Delete(key) + } + return b.Put(key, []byte(compressorName)) + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, // and can be reused given the opaque location data. func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { @@ -251,21 +282,34 @@ func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope type }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } -// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates. -func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime { - b := scopeBucket.Bucket([]byte(digest.String())) +// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates. +func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { + digestKey := []byte(digest.String()) + b := scopeBucket.Bucket(digestKey) if b == nil { return candidates } + compressorName := blobinfocache.UnknownCompression + if compressionBucket != nil { + // the bucket won't exist if the cache was created by a v1 implementation and + // hasn't yet been updated by a v2 implementation + if compressorNameValue := compressionBucket.Get(digestKey); len(compressorNameValue) > 0 { + compressorName = string(compressorNameValue) + } + } + if compressorName == blobinfocache.UnknownCompression && requireCompressionInfo { + return candidates + } _ = b.ForEach(func(k, v []byte) error { t := time.Time{} if err := t.UnmarshalBinary(v); err != nil { return err } candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: types.BICReplacementCandidate{ - Digest: digest, - Location: types.BICLocationReference{Opaque: string(k)}, + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: types.BICLocationReference{Opaque: string(k)}, }, LastSeen: t, }) @@ -274,13 +318,17 @@ func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW return candidates } -// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. -func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { +func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { + return bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, true) +} + +func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { res := []prioritize.CandidateWithTime{} var uncompressedDigestValue digest.Digest // = "" if err := bdc.view(func(tx *bolt.Tx) error { @@ -296,8 +344,11 @@ func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types if scopeBucket == nil { return nil } + // compressionBucket won't have been created if previous writers never recorded info about compression, + // and we don't want to fail just because of that + compressionBucket := tx.Bucket(digestCompressorBucket) - res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, requireCompressionInfo) if canSubstitute { if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { b := tx.Bucket(digestByUncompressedBucket) @@ -310,7 +361,7 @@ func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types return err } if d != primaryDigest && d != uncompressedDigestValue { - res = bdc.appendReplacementCandidates(res, scopeBucket, d) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, requireCompressionInfo) } return nil }); err != nil { @@ -319,14 +370,24 @@ func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types } } if uncompressedDigestValue != primaryDigest { - res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, requireCompressionInfo) } } } return nil }); err != nil { // Including os.IsNotExist(err) - return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)? + return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)? } return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue) } + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, false)) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go index 5deca4a82..6f5506d94 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -6,7 +6,7 @@ import ( "sort" "time" - "github.com/containers/image/v5/types" + "github.com/containers/image/v5/internal/blobinfocache" "github.com/opencontainers/go-digest" ) @@ -17,8 +17,8 @@ const replacementAttempts = 5 // CandidateWithTime is the input to types.BICReplacementCandidate prioritization. type CandidateWithTime struct { - Candidate types.BICReplacementCandidate // The replacement candidate - LastSeen time.Time // Time the candidate was last known to exist (either read or written) + Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate + LastSeen time.Time // Time the candidate was last known to exist (either read or written) } // candidateSortState is a local state implementing sort.Interface on candidates to prioritize, @@ -79,7 +79,7 @@ func (css *candidateSortState) Swap(i, j int) { // destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the // number of entries to limit, only to make testing simpler. -func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate { +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 { // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should // compare equal. sort.Sort(&candidateSortState{ @@ -92,7 +92,7 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, if resLength > maxCandidates { resLength = maxCandidates } - res := make([]types.BICReplacementCandidate, resLength) + res := make([]blobinfocache.BICReplacementCandidate2, resLength) for i := range res { res[i] = cs[i].Candidate } @@ -105,6 +105,6 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, // // WARNING: The array of candidates is destructively modified. (The implementation of this function could of course // make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) -func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate { +func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 { return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index 8f28c6623..3d598057e 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" @@ -25,6 +26,7 @@ type cache struct { uncompressedDigests map[digest.Digest]digest.Digest digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference + compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest } // New returns a BlobInfoCache implementation which is in-memory only. @@ -36,10 +38,15 @@ type cache struct { // Manual users of types.{ImageSource,ImageDestination} might also use // this instead of a persistent cache. func New() types.BlobInfoCache { + return new2() +} + +func new2() *cache { return &cache{ uncompressedDigests: map[digest.Digest]digest.Digest{}, digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, + compressors: map[digest.Digest]string{}, } } @@ -101,14 +108,34 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type locationScope[location] = time.Now() // Possibly overwriting an older entry. } +// RecordDigestCompressorName records that the blob with the specified digest is either compressed with the specified +// algorithm, or uncompressed, or that we no longer know. +func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if compressorName == blobinfocache.UnknownCompression { + delete(mem.compressors, blobDigest) + return + } + mem.compressors[blobDigest] = compressorName +} + // appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. -func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime { +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present for l, t := range locations { + compressorName, compressorKnown := mem.compressors[digest] + if !compressorKnown { + if requireCompressionInfo { + continue + } + compressorName = blobinfocache.UnknownCompression + } candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: types.BICReplacementCandidate{ - Digest: digest, - Location: l, + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: l, }, LastSeen: t, }) @@ -123,21 +150,35 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false)) +} + +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { + return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true) +} + +func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { mem.mutex.Lock() defer mem.mutex.Unlock() res := []prioritize.CandidateWithTime{} - res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest) + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo) var uncompressedDigest digest.Digest // = "" if canSubstitute { if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map for d := range otherDigests { if d != primaryDigest && d != uncompressedDigest { - res = mem.appendReplacementCandidates(res, transport, scope, d) + res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo) } } if uncompressedDigest != primaryDigest { - res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest) + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo) } } } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go index fa1879afd..2a54ff312 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go @@ -2,6 +2,7 @@ package none import ( + "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" ) @@ -16,7 +17,7 @@ type noCache struct { // Manifest.Inspect, because configs only have one representation. // Any use of BlobInfoCache with blobs should usually use at least a // short-lived cache, ideally blobinfocache.DefaultCache. -var NoCache types.BlobInfoCache = noCache{} +var NoCache blobinfocache.BlobInfoCache2 = blobinfocache.FromBlobInfoCache(&noCache{}) // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. // May return anyDigest if it is known to be uncompressed. diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index cf82ee861..983df41d8 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -86,7 +86,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Note: we need to read the auth files in the inverse order to prevent // a priority inversion when writing to the map. authConfigs := make(map[string]types.DockerAuthConfig) - paths := getAuthFilePaths(sys) + paths := getAuthFilePaths(sys, homedir.Get()) for i := len(paths) - 1; i >= 0; i-- { path := paths[i] // readJSONFile returns an empty map in case the path doesn't exist. @@ -126,7 +126,9 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // getAuthFilePaths returns a slice of authPaths based on the system context // in the order they should be searched. Note that some paths may not exist. -func getAuthFilePaths(sys *types.SystemContext) []authPath { +// The homeDir parameter should always be homedir.Get(), and is only intended to be overridden +// by tests. +func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath { paths := []authPath{} pathToAuth, lf, err := getPathToAuth(sys) if err == nil { @@ -139,7 +141,7 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath { } xdgCfgHome := os.Getenv("XDG_CONFIG_HOME") if xdgCfgHome == "" { - xdgCfgHome = filepath.Join(homedir.Get(), ".config") + xdgCfgHome = filepath.Join(homeDir, ".config") } paths = append(paths, authPath{path: filepath.Join(xdgCfgHome, xdgConfigHomePath), legacyFormat: false}) if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" { @@ -148,11 +150,11 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath { ) } else { paths = append(paths, - authPath{path: filepath.Join(homedir.Get(), dockerHomePath), legacyFormat: false}, + authPath{path: filepath.Join(homeDir, dockerHomePath), legacyFormat: false}, ) } paths = append(paths, - authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true}, + authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true}, ) return paths } @@ -161,6 +163,12 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath { // file or .docker/config.json, including support for OAuth2 and IdentityToken. // If an entry is not found, an empty struct is returned. func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) { + return getCredentialsWithHomeDir(sys, registry, homedir.Get()) +} + +// getCredentialsWithHomeDir is an internal implementation detail of GetCredentials, +// it exists only to allow testing it with an artificial home directory. +func getCredentialsWithHomeDir(sys *types.SystemContext, registry, homeDir string) (types.DockerAuthConfig, error) { if sys != nil && sys.DockerAuthConfig != nil { logrus.Debug("Returning credentials from DockerAuthConfig") return *sys.DockerAuthConfig, nil @@ -177,7 +185,7 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth } } - for _, path := range getAuthFilePaths(sys) { + for _, path := range getAuthFilePaths(sys, homeDir) { authConfig, err := findAuthentication(registry, path.path, path.legacyFormat) if err != nil { logrus.Debugf("Credentials not found") @@ -203,7 +211,13 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth // GetCredentials API. The new API should be used and this API is kept to // maintain backward compatibility. func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { - auth, err := GetCredentials(sys, registry) + return getAuthenticationWithHomeDir(sys, registry, homedir.Get()) +} + +// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication, +// it exists only to allow testing it with an artificial home directory. +func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir string) (string, string, error) { + auth, err := getCredentialsWithHomeDir(sys, registry, homeDir) if err != nil { return "", "", err } @@ -262,6 +276,12 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { // getPathToAuth gets the path of the auth.json file used for reading and writing credentials // returns the path, and a bool specifies whether the file is in legacy format func getPathToAuth(sys *types.SystemContext) (string, bool, error) { + return getPathToAuthWithOS(sys, runtime.GOOS) +} + +// getPathToAuthWithOS is an internal implementation detail of getPathToAuth, +// it exists only to allow testing it with an artificial runtime.GOOS. +func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, error) { if sys != nil { if sys.AuthFilePath != "" { return sys.AuthFilePath, false, nil @@ -273,7 +293,7 @@ func getPathToAuth(sys *types.SystemContext) (string, bool, error) { return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil } } - if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + if goOS == "windows" || goOS == "darwin" { return filepath.Join(homedir.Get(), nonLinuxAuthFilePath), false, nil } diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go index 198ac1cc6..f1e5c453e 100644 --- a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go @@ -149,9 +149,9 @@ const ( func (r *Resolved) Description() string { switch r.rationale { case rationaleAlias: - return fmt.Sprintf("Resolved short name %q to a recorded short-name alias (origin: %s)", r.userInput, r.originDescription) + return fmt.Sprintf("Resolved %q as an alias (%s)", r.userInput, r.originDescription) case rationaleUSR: - return fmt.Sprintf("Completed short name %q with unqualified-search registries (origin: %s)", r.userInput, r.originDescription) + return fmt.Sprintf("Resolving %q using unqualified-search registries (%s)", r.userInput, r.originDescription) case rationaleUserSelection, rationaleNone: fallthrough default: @@ -240,14 +240,14 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { // Create a copy of the system context to make it usable beyond this // function call. - var sys *types.SystemContext if ctx != nil { - sys = &(*ctx) + copy := *ctx + ctx = © } resolved.systemContext = ctx // Detect which mode we're running in. - mode, err := sysregistriesv2.GetShortNameMode(sys) + mode, err := sysregistriesv2.GetShortNameMode(ctx) if err != nil { return nil, err } @@ -276,7 +276,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { resolved.userInput = shortNameRepo // If there's already an alias, use it. - namedAlias, aliasOriginDescription, err := sysregistriesv2.ResolveShortNameAlias(sys, shortNameRepo.String()) + namedAlias, aliasOriginDescription, err := sysregistriesv2.ResolveShortNameAlias(ctx, shortNameRepo.String()) if err != nil { return nil, err } @@ -307,7 +307,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { resolved.rationale = rationaleUSR // Query the registry for unqualified-search registries. - unqualifiedSearchRegistries, usrConfig, err := sysregistriesv2.UnqualifiedSearchRegistriesWithOrigin(sys) + unqualifiedSearchRegistries, usrConfig, err := sysregistriesv2.UnqualifiedSearchRegistriesWithOrigin(ctx) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index 89ad7c533..3312237ef 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -20,7 +20,7 @@ import ( // systemRegistriesConfPath is the path to the system-wide registry // configuration file and is used to add/subtract potential registries for // obtaining images. You can override this at build time with -// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path' +// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfPath=$your_path' var systemRegistriesConfPath = builtinRegistriesConfPath // builtinRegistriesConfPath is the path to the registry configuration file. @@ -30,7 +30,7 @@ const builtinRegistriesConfPath = "/etc/containers/registries.conf" // systemRegistriesConfDirPath is the path to the system-wide registry // configuration directory and is used to add/subtract potential registries for // obtaining images. You can override this at build time with -// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfDirecotyPath=$your_path' +// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirecotyPath=$your_path' var systemRegistriesConfDirPath = builtinRegistriesConfDirPath // builtinRegistriesConfDirPath is the path to the registry configuration directory. @@ -405,9 +405,15 @@ type configWrapper struct { // newConfigWrapper returns a configWrapper for the specified SystemContext. func newConfigWrapper(ctx *types.SystemContext) configWrapper { + return newConfigWrapperWithHomeDir(ctx, homedir.Get()) +} + +// newConfigWrapperWithHomeDir is an internal implementation detail of newConfigWrapper, +// it exists only to allow testing it with an artificial home directory. +func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) configWrapper { var wrapper configWrapper - userRegistriesFilePath := filepath.Join(homedir.Get(), userRegistriesFile) - userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir) + userRegistriesFilePath := filepath.Join(homeDir, userRegistriesFile) + userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir) // decide configPath using per-user path or system file if ctx != nil && ctx.SystemRegistriesConfPath != "" { diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go index d8cc4a09b..82fbb68cb 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -30,7 +30,7 @@ import ( // systemDefaultPolicyPath is the policy path used for DefaultPolicy(). // You can override this at build time with -// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path' +// -ldflags '-X github.com/containers/image/v5/signature.systemDefaultPolicyPath=$your_path' var systemDefaultPolicyPath = builtinDefaultPolicyPath // builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). @@ -59,10 +59,16 @@ func DefaultPolicy(sys *types.SystemContext) (*Policy, error) { // defaultPolicyPath returns a path to the default policy of the system. func defaultPolicyPath(sys *types.SystemContext) string { + return defaultPolicyPathWithHomeDir(sys, homedir.Get()) +} + +// defaultPolicyPathWithHomeDir is an internal implementation detail of defaultPolicyPath, +// it exists only to allow testing it with an artificial home directory. +func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) string { if sys != nil && sys.SignaturePolicyPath != "" { return sys.SignaturePolicyPath } - userPolicyFilePath := filepath.Join(homedir.Get(), userPolicyFile) + userPolicyFilePath := filepath.Join(homeDir, userPolicyFile) if _, err := os.Stat(userPolicyFilePath); err == nil { return userPolicyFilePath } diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index d24f8bbee..924d684ae 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -463,7 +463,9 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/tarball/doc.go b/vendor/github.com/containers/image/v5/tarball/doc.go index ead2d4263..e9d321b8f 100644 --- a/vendor/github.com/containers/image/v5/tarball/doc.go +++ b/vendor/github.com/containers/image/v5/tarball/doc.go @@ -5,11 +5,13 @@ // package main // // import ( -// "fmt" +// "context" // // cp "github.com/containers/image/v5/copy" +// "github.com/containers/image/v5/signature" // "github.com/containers/image/v5/tarball" // "github.com/containers/image/v5/transports/alltransports" +// "github.com/containers/image/v5/types" // imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" // ) // @@ -39,7 +41,18 @@ // if err != nil { // panic(err) // } -// err = cp.Image(nil, dest, src, nil) +// +// policy, err := signature.DefaultPolicy(nil) +// if err != nil { +// panic(err) +// } +// +// pc, err := signature.NewPolicyContext(policy) +// if err != nil { +// panic(err) +// } +// defer pc.Destroy() +// _, err = cp.Image(context.TODO(), pc, dest, src, nil) // if err != nil { // panic(err) // } diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 3c5126b4e..8655ca443 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -126,14 +126,18 @@ type BlobInfo struct { Annotations map[string]string MediaType string // CompressionOperation is used in Image.UpdateLayerInfos to instruct - // whether the original layer should be preserved or (de)compressed. The - // field defaults to preserve the original layer. + // whether the original layer's "compressed or not" should be preserved, + // possibly while changing the compression algorithm from one to another, + // or if it should be compressed or decompressed. The field defaults to + // preserve the original layer's compressedness. // TODO: To remove together with CryptoOperation in re-design to remove // field out out of BlobInfo. CompressionOperation LayerCompression // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be - // set when `CompressionOperation == Compress`. + // set when `CompressionOperation == Compress` and MAY be set when + // `CompressionOperation == PreserveOriginal` and the compression type is + // being changed for an already-compressed layer. CompressionAlgorithm *compression.Algorithm // CryptoOperation is used in Image.UpdateLayerInfos to instruct // whether the original layer was encrypted/decrypted @@ -194,6 +198,9 @@ type BICReplacementCandidate struct { // // None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; // users of the cache should just fall back to copying the blobs the usual way. +// +// The BlobInfoCache interface is deprecated. Consumers of this library should use one of the implementations provided by +// subpackages of the library's "pkg/blobinfocache" package in preference to implementing the interface on their own. type BlobInfoCache interface { // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. // May return anyDigest if it is known to be uncompressed. @@ -306,7 +313,9 @@ type ImageDestination interface { // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. - // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. + // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may + // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be + // reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) @@ -397,6 +406,12 @@ type Image interface { // UpdatedImage returns a types.Image modified according to options. // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. // This does not change the state of the original Image object. + // The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if + // manifests of type options.ManifestMIMEType can not include layers that are compressed + // in accordance with the CompressionOperation and CompressionAlgorithm specified in one + // or more options.LayerInfos items, though retrying with a different + // options.ManifestMIMEType or with different CompressionOperation+CompressionAlgorithm + // values might succeed. UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) // SupportsEncryption returns an indicator that the image supports encryption // @@ -600,6 +615,8 @@ type SystemContext struct { DockerDisableV1Ping bool // If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list DockerDisableDestSchema1MIMETypes bool + // If true, the physical pull source of docker transport images logged as info level + DockerLogMirrorChoice bool // Directory to use for OSTree temporary files OSTreeTmpDirPath string diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 48ecf938c..1fc775410 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 9 + VersionMinor = 10 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/klauspost/compress/flate/gen_inflate.go b/vendor/github.com/klauspost/compress/flate/gen_inflate.go deleted file mode 100644 index 35fc072a3..000000000 --- a/vendor/github.com/klauspost/compress/flate/gen_inflate.go +++ /dev/null @@ -1,294 +0,0 @@ -// +build generate - -//go:generate go run $GOFILE && gofmt -w inflate_gen.go - -package main - -import ( - "os" - "strings" -) - -func main() { - f, err := os.Create("inflate_gen.go") - if err != nil { - panic(err) - } - defer f.Close() - types := []string{"*bytes.Buffer", "*bytes.Reader", "*bufio.Reader", "*strings.Reader"} - names := []string{"BytesBuffer", "BytesReader", "BufioReader", "StringsReader"} - imports := []string{"bytes", "bufio", "io", "strings", "math/bits"} - f.WriteString(`// Code generated by go generate gen_inflate.go. DO NOT EDIT. - -package flate - -import ( -`) - - for _, imp := range imports { - f.WriteString("\t\"" + imp + "\"\n") - } - f.WriteString(")\n\n") - - template := ` - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) $FUNCNAME$() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.($TYPE$) - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := fr.ReadByte() - if err != nil { - f.b = b - f.nb = nb - f.err = noEOF(err) - return - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).$FUNCNAME$ - f.stepState = stateInit - return - } - goto readLiteral - case v == 256: - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for f.nb < n { - c, err := fr.ReadByte() - if err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - return - } - - var dist uint32 - if f.hd == nil { - for f.nb < 5 { - c, err := fr.ReadByte() - if err != nil { - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 - } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := fr.ReadByte() - if err != nil { - f.b = b - f.nb = nb - f.err = noEOF(err) - return - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { - c, err := fr.ReadByte() - if err != nil { - if debugDecode { - fmt.Println("morebits f.nb<nb:", err) - } - f.err = err - return - } - f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 - } - extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) - f.b >>= nb & regSizeMaskUint32 - f.nb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - default: - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { - if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).$FUNCNAME$ // We need to continue this work - f.stepState = stateDict - return - } - goto readLiteral - } -} - -` - for i, t := range types { - s := strings.Replace(template, "$FUNCNAME$", "huffman"+names[i], -1) - s = strings.Replace(s, "$TYPE$", t, -1) - f.WriteString(s) - } - f.WriteString("func (f *decompressor) huffmanBlockDecoder() func() {\n") - f.WriteString("\tswitch f.r.(type) {\n") - for i, t := range types { - f.WriteString("\t\tcase " + t + ":\n") - f.WriteString("\t\t\treturn f.huffman" + names[i] + "\n") - } - f.WriteString("\t\tdefault:\n") - f.WriteString("\t\t\treturn f.huffmanBlockGeneric") - f.WriteString("\t}\n}\n") -} diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md index e12da4db2..8b6e5c663 100644 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -14,7 +14,9 @@ but it can be used as a secondary step to compressors (like Snappy) that does no ## News
- * Mar 2018: First implementation released. Consider this beta software for now.
+This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package.
+
+This ensures that most functionality is well tested.
# Usage
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 62fd37324..1d41c25d2 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -5,7 +5,6 @@ package zstd import ( - "bytes" "errors" "io" "sync" @@ -179,11 +178,13 @@ func (d *Decoder) Reset(r io.Reader) error { } // If bytes buffer and < 1MB, do sync decoding anyway. - if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 { + if bb, ok := r.(byter); ok && bb.Len() < 1<<20 { + var bb2 byter + bb2 = bb if debug { println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) } - b := bb.Bytes() + b := bb2.Bytes() var dst []byte if cap(d.current.b) > 0 { dst = d.current.b diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 0c761dd62..9056beef2 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -4,6 +4,7 @@ package zstd import ( + "bytes" "errors" "log" "math" @@ -146,3 +147,10 @@ func load64(b []byte, i int) uint64 { return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 } + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/ulikunitz/xz/SECURITY.md b/vendor/github.com/ulikunitz/xz/SECURITY.md new file mode 100644 index 000000000..5f7ec01b3 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/SECURITY.md @@ -0,0 +1,10 @@ +# Security Policy + +## Supported Versions + +Currently the last minor version v0.5.x is supported. + +## Reporting a Vulnerability + +Report a vulnerability by creating a Github issue at +<https://github.com/ulikunitz/xz/issues>. Expect a response in a week. diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md index 84bd5dcbd..88c7341c8 100644 --- a/vendor/github.com/ulikunitz/xz/TODO.md +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -8,19 +8,17 @@ 1. Review encoder and check for lzma improvements under xz. 2. Fix binary tree matcher. -3. Compare compression ratio with xz tool using comparable parameters - and optimize parameters -4. Do some optimizations - - rename operation action and make it a simple type of size 8 - - make maxMatches, wordSize parameters - - stop searching after a certain length is found (parameter sweetLen) +3. Compare compression ratio with xz tool using comparable parameters and optimize parameters +4. rename operation action and make it a simple type of size 8 +5. make maxMatches, wordSize parameters +6. stop searching after a certain length is found (parameter sweetLen) ## Release v0.7 1. Optimize code 2. Do statistical analysis to get linear presets. 3. Test sync.Pool compatability for xz and lzma Writer and Reader -3. Fuzz optimized code. +4. Fuzz optimized code. ## Release v0.8 @@ -44,53 +42,73 @@ ## Package lzma -### Release v0.6 - -- Rewrite Encoder into a simple greedy one-op-at-a-time encoder - including - + simple scan at the dictionary head for the same byte - + use the killer byte (requiring matches to get longer, the first - test should be the byte that would make the match longer) +### v0.6 +* Rewrite Encoder into a simple greedy one-op-at-a-time encoder including + * simple scan at the dictionary head for the same byte + * use the killer byte (requiring matches to get longer, the first test should be the byte that would make the match longer) ## Optimizations -- There may be a lot of false sharing in lzma.State; check whether this - can be improved by reorganizing the internal structure of it. -- Check whether batching encoding and decoding improves speed. +* There may be a lot of false sharing in lzma. State; check whether this can be improved by reorganizing the internal structure of it. + +* Check whether batching encoding and decoding improves speed. ### DAG optimizations -- Use full buffer to create minimal bit-length above range encoder. -- Might be too slow (see v0.4) +* Use full buffer to create minimal bit-length above range encoder. +* Might be too slow (see v0.4) ### Different match finders -- hashes with 2, 3 characters additional to 4 characters -- binary trees with 2-7 characters (uint64 as key, use uint32 as +* hashes with 2, 3 characters additional to 4 characters +* binary trees with 2-7 characters (uint64 as key, use uint32 as + pointers into a an array) -- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + +* rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + into an array with bit-steeling for the colors) ## Release Procedure -- execute goch -l for all packages; probably with lower param like 0.5. -- check orthography with gospell -- Write release notes in doc/relnotes. -- Update README.md -- xb copyright . in xz directory to ensure all new files have Copyright - header -- VERSION=<version> go generate github.com/ulikunitz/xz/... to update - version files -- Execute test for Linux/amd64, Linux/x86 and Windows/amd64. -- Update TODO.md - write short log entry -- git checkout master && git merge dev -- git tag -a <version> -- git push +* execute goch -l for all packages; probably with lower param like 0.5. +* check orthography with gospell +* Write release notes in doc/relnotes. +* Update README.md +* xb copyright . in xz directory to ensure all new files have Copyright header +* `VERSION=<version> go generate github.com/ulikunitz/xz/...` to update version files +* Execute test for Linux/amd64, Linux/x86 and Windows/amd64. +* Update TODO.md - write short log entry +* `git checkout master && git merge dev` +* `git tag -a <version>` +* `git push` ## Log -## 2020-08-19 +### 2020-12-17 + +Release v0.5.9 fixes warnings, a typo and adds SECURITY.md. + +One fix is interesting. + +```go +const ( + a byte = 0x1 + b = 0x2 +) +``` + +The constants a and b don't have the same type. Correct is + +```go +const ( + a byte = 0x1 + b byte = 0x2 +) +``` + +### 2020-08-19 Release v0.5.8 fixes issue [issue #35](https://github.com/ulikunitz/xz/issues/35). @@ -208,8 +226,8 @@ MININT. ### 2015-06-04 -It has been a productive day. I improved the interface of lzma.Reader -and lzma.Writer and fixed the error handling. +It has been a productive day. I improved the interface of lzma. Reader +and lzma. Writer and fixed the error handling. ### 2015-06-01 @@ -260,7 +278,7 @@ needed anymore. However I will implement a ReaderState and WriterState type to use static typing to ensure the right State object is combined with the -right lzbase.Reader and lzbase.Writer. +right lzbase. Reader and lzbase. Writer. As a start I have implemented ReaderState and WriterState to ensure that the state for reading is only used by readers and WriterState only @@ -282,11 +300,11 @@ old lzma package has been completely removed. ### 2015-04-05 -Implemented lzma.Reader and tested it. +Implemented lzma. Reader and tested it. ### 2015-04-04 -Implemented baseReader by adapting code form lzma.Reader. +Implemented baseReader by adapting code form lzma. Reader. ### 2015-04-03 @@ -302,7 +320,7 @@ However in Francesco Campoy's presentation "Go for Javaneros (Javaïstes?)" is the the idea that using an embedded field E, all the methods of E will be defined on T. If E is an interface T satisfies E. -https://talks.golang.org/2014/go4java.slide#51 +<https://talks.golang.org/2014/go4java.slide#51> I have never used this, but it seems to be a cool idea. @@ -327,11 +345,11 @@ and the opCodec. 1. Implemented simple lzmago tool 2. Tested tool against large 4.4G file - - compression worked correctly; tested decompression with lzma - - decompression hits a full buffer condition + * compression worked correctly; tested decompression with lzma + * decompression hits a full buffer condition 3. Fixed a bug in the compressor and wrote a test for it 4. Executed full cycle for 4.4 GB file; performance can be improved ;-) ### 2015-01-11 -- Release v0.2 because of the working LZMA encoder and decoder +* Release v0.2 because of the working LZMA encoder and decoder diff --git a/vendor/github.com/ulikunitz/xz/format.go b/vendor/github.com/ulikunitz/xz/format.go index edfec9a94..84b58c9dd 100644 --- a/vendor/github.com/ulikunitz/xz/format.go +++ b/vendor/github.com/ulikunitz/xz/format.go @@ -47,9 +47,9 @@ const HeaderLen = 12 // Constants for the checksum methods supported by xz. const ( None byte = 0x0 - CRC32 = 0x1 - CRC64 = 0x4 - SHA256 = 0xa + CRC32 byte = 0x1 + CRC64 byte = 0x4 + SHA256 byte = 0xa ) // errInvalidFlags indicates that flags are invalid. @@ -569,22 +569,6 @@ func readFilters(r io.Reader, count int) (filters []filter, err error) { return []filter{f}, err } -// writeFilters writes the filters. -func writeFilters(w io.Writer, filters []filter) (n int, err error) { - for _, f := range filters { - p, err := f.MarshalBinary() - if err != nil { - return n, err - } - k, err := w.Write(p) - n += k - if err != nil { - return n, err - } - } - return n, nil -} - /*** Index ***/ // record describes a block in the xz file index. diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go index 58d6a92a7..527ea19a7 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/bintree.go +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -5,10 +5,7 @@ package lzma import ( - "bufio" "errors" - "fmt" - "io" "unicode" ) @@ -349,6 +346,7 @@ func dumpX(x uint32) string { return string(a) } +/* // dumpNode writes a representation of the node v into the io.Writer. func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { if v == null { @@ -377,6 +375,7 @@ func (t *binTree) dump(w io.Writer) error { t.dumpNode(bw, t.root, 0) return bw.Flush() } +*/ func (t *binTree) distance(v uint32) int { dist := int(t.front) - int(v) diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go index 2784ec6ba..d4309f97e 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/bitops.go +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -18,6 +18,7 @@ var ntz32Table = [32]int8{ 30, 17, 8, 14, 29, 13, 28, 27, } +/* // ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. func ntz32(x uint32) int { if x == 0 { @@ -26,6 +27,7 @@ func ntz32(x uint32) int { x = (x & -x) * ntz32Const return int(ntz32Table[x>>27]) } +*/ // nlz32 computes the number of leading zeros for an unsigned 32-bit integer. func nlz32(x uint32) int { diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go index e5a760a50..4b820792a 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/decoder.go +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -200,7 +200,7 @@ func (d *decoder) decompress() error { op, err := d.readOp() switch err { case nil: - break + // break case errEOS: d.eos = true if !d.rd.possiblyAtEnd() { diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go index ba06712b0..dd44e6625 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -126,10 +126,3 @@ func (d *decoderDict) Available() int { return d.buf.Available() } // Read reads data from the buffer contained in the decoder dictionary. func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } - -// Buffered returns the number of bytes currently buffered in the -// decoder dictionary. -func (d *decoderDict) buffered() int { return d.buf.Buffered() } - -// Peek gets data from the buffer without advancing the rear index. -func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) } diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go index e6e0c6ddf..064642831 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -4,21 +4,10 @@ package lzma -import "fmt" - // directCodec allows the encoding and decoding of values with a fixed number // of bits. The number of bits must be in the range [1,32]. type directCodec byte -// makeDirectCodec creates a directCodec. The function panics if the number of -// bits is not in the range [1,32]. -func makeDirectCodec(bits int) directCodec { - if !(1 <= bits && bits <= 32) { - panic(fmt.Errorf("bits=%d out of range", bits)) - } - return directCodec(bits) -} - // Bits returns the number of bits supported by this codec. func (dc directCodec) Bits() int { return int(dc) diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go index 69871c04a..9ed486d27 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -20,8 +20,6 @@ const ( posSlotBits = 6 // number of align bits alignBits = 4 - // maximum position slot - maxPosSlot = 63 ) // distCodec provides encoding and decoding of distance values. @@ -45,20 +43,6 @@ func (dc *distCodec) deepcopy(src *distCodec) { dc.alignCodec.deepcopy(&src.alignCodec) } -// distBits returns the number of bits required to encode dist. -func distBits(dist uint32) int { - if dist < startPosModel { - return 6 - } - // slot s > 3, dist d - // s = 2(bits(d)-1) + bit(d, bits(d)-2) - // s>>1 = bits(d)-1 - // bits(d) = 32-nlz32(d) - // s>>1=31-nlz32(d) - // n = 5 + (s>>1) = 36 - nlz32(d) - return 36 - nlz32(dist) -} - // newDistCodec creates a new distance codec. func (dc *distCodec) init() { for i := range dc.posSlotCodecs { diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go index 40f3d3f64..c36308d7c 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -19,7 +19,7 @@ type matcher interface { } // encoderDict provides the dictionary of the encoder. It includes an -// addtional buffer atop of the actual dictionary. +// additional buffer atop of the actual dictionary. type encoderDict struct { buf buffer m matcher diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go index cd148812c..ffeca35c3 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/header2.go +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -264,7 +264,7 @@ type chunkState byte // state const ( start chunkState = 'S' - stop = 'T' + stop chunkState = 'T' ) // errors for the chunk state handling diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go index 927395bd8..35b064064 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -56,19 +56,6 @@ func (lc *lengthCodec) init() { lc.high = makeTreeCodec(8) } -// lBits gives the number of bits used for the encoding of the l value -// provided to the range encoder. -func lBits(l uint32) int { - switch { - case l < 8: - return 4 - case l < 16: - return 5 - default: - return 10 - } -} - // Encode encodes the length offset. The length offset l can be compute by // subtracting minMatchLen (2) from the actual length. // diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go index ca31530fd..7b1ad1d9b 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -123,10 +123,3 @@ const ( minLP = 0 maxLP = 4 ) - -// minState and maxState define a range for the state values stored in -// the State values. -const ( - minState = 0 - maxState = 11 -) diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go index a75c9b46c..2f9b78ea5 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/operation.go +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -5,7 +5,6 @@ package lzma import ( - "errors" "fmt" "unicode" ) @@ -24,30 +23,6 @@ type match struct { n int } -// verify checks whether the match is valid. If that is not the case an -// error is returned. -func (m match) verify() error { - if !(minDistance <= m.distance && m.distance <= maxDistance) { - return errors.New("distance out of range") - } - if !(1 <= m.n && m.n <= maxMatchLen) { - return errors.New("length out of range") - } - return nil -} - -// l return the l-value for the match, which is the difference of length -// n and 2. -func (m match) l() uint32 { - return uint32(m.n - minMatchLen) -} - -// dist returns the dist value for the match, which is one less of the -// distance stored in the match. -func (m match) dist() uint32 { - return uint32(m.distance - minDistance) -} - // Len returns the number of bytes matched. func (m match) Len() int { return m.n diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go index 7189a0377..7b299abfe 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -131,32 +131,6 @@ type rangeDecoder struct { code uint32 } -// init initializes the range decoder, by reading from the byte reader. -func (d *rangeDecoder) init() error { - d.nrange = 0xffffffff - d.code = 0 - - b, err := d.br.ReadByte() - if err != nil { - return err - } - if b != 0 { - return errors.New("newRangeDecoder: first byte not zero") - } - - for i := 0; i < 4; i++ { - if err = d.updateCode(); err != nil { - return err - } - } - - if d.code >= d.nrange { - return errors.New("newRangeDecoder: d.code >= d.nrange") - } - - return nil -} - // newRangeDecoder initializes a range decoder. It reads five bytes from the // reader and therefore may return an error. func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go index 33074e624..e34c23f9c 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/reader2.go +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -48,7 +48,6 @@ type Reader2 struct { chunkReader io.Reader cstate chunkState - ctype chunkType } // NewReader2 creates a reader for an LZMA2 chunk sequence. diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go index 03f061cf1..fbe3a3942 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/state.go +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -53,12 +53,6 @@ func (s *state) Reset() { s.distCodec.init() } -// initState initializes the state. -func initState(s *state, p Properties) { - *s = state{Properties: p} - s.Reset() -} - // newState creates a new state from the give Properties. func newState(p Properties) *state { s := &state{Properties: p} diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go index 22cd6d500..795858914 100644 --- a/vendor/github.com/ulikunitz/xz/reader.go +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -26,13 +26,6 @@ type ReaderConfig struct { SingleStream bool } -// fill replaces all zero values with their default values. -func (c *ReaderConfig) fill() { - if c.DictCap == 0 { - c.DictCap = 8 * 1024 * 1024 - } -} - // Verify checks the reader parameters for Validity. Zero values will be // replaced by default values. func (c *ReaderConfig) Verify() error { @@ -165,9 +158,6 @@ func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) return r, nil } -// errIndex indicates an error with the xz file index. -var errIndex = errors.New("xz: error in xz file index") - // readTail reads the index body and the xz footer. func (r *streamReader) readTail() error { index, n, err := readIndexBody(r.xz) @@ -265,7 +255,6 @@ type blockReader struct { n int64 hash hash.Hash r io.Reader - err error } // newBlockReader creates a new block reader. @@ -315,10 +304,6 @@ func (br *blockReader) record() record { return record{br.unpaddedSize(), br.uncompressedSize()} } -// errBlockSize indicates that the size of the block in the block header -// is wrong. -var errBlockSize = errors.New("xz: wrong uncompressed size for block") - // Read reads data from the block. func (br *blockReader) Read(p []byte) (n int, err error) { n, err = br.r.Read(p) diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go index aec10dfa6..a9ed44912 100644 --- a/vendor/github.com/ulikunitz/xz/writer.go +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -6,6 +6,7 @@ package xz import ( "errors" + "fmt" "hash" "io" @@ -190,6 +191,9 @@ func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { return nil, err } data, err := w.h.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("w.h.MarshalBinary(): error %w", err) + } if _, err = xz.Write(data); err != nil { return nil, err } diff --git a/vendor/github.com/vbauerster/mpb/v5/.travis.yml b/vendor/github.com/vbauerster/mpb/v5/.travis.yml index 0eb0f2f20..9a203a67d 100644 --- a/vendor/github.com/vbauerster/mpb/v5/.travis.yml +++ b/vendor/github.com/vbauerster/mpb/v5/.travis.yml @@ -1,4 +1,7 @@ language: go +arch: + - amd64 + - ppc64le go: - 1.14.x diff --git a/vendor/github.com/vbauerster/mpb/v5/bar_option.go b/vendor/github.com/vbauerster/mpb/v5/bar_option.go index 31b7939b0..e7d2e41f9 100644 --- a/vendor/github.com/vbauerster/mpb/v5/bar_option.go +++ b/vendor/github.com/vbauerster/mpb/v5/bar_option.go @@ -123,13 +123,20 @@ func makeExtFunc(filler BarFiller) extFunc { } } -// TrimSpace trims bar's edge spaces. -func TrimSpace() BarOption { +// BarFillerTrim bar filler is rendered with leading and trailing space +// like ' [===] ' by default. With this option leading and trailing +// space will be removed. +func BarFillerTrim() BarOption { return func(s *bState) { s.trimSpace = true } } +// TrimSpace is an alias to BarFillerTrim. +func TrimSpace() BarOption { + return BarFillerTrim() +} + // BarStyle overrides mpb.DefaultBarStyle which is "[=>-]<+". // It's ok to pass string containing just 5 runes, for example "╢▌▌░╟", // if you don't need to override '<' (reverse tip) and '+' (refill rune). diff --git a/vendor/github.com/vbauerster/mpb/v5/go.mod b/vendor/github.com/vbauerster/mpb/v5/go.mod index 642bf0a5a..e80d1a10d 100644 --- a/vendor/github.com/vbauerster/mpb/v5/go.mod +++ b/vendor/github.com/vbauerster/mpb/v5/go.mod @@ -4,7 +4,7 @@ require ( github.com/VividCortex/ewma v1.1.1 github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/mattn/go-runewidth v0.0.9 - golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed + golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 ) go 1.14 diff --git a/vendor/github.com/vbauerster/mpb/v5/go.sum b/vendor/github.com/vbauerster/mpb/v5/go.sum index 7ad08f141..62cc10af0 100644 --- a/vendor/github.com/vbauerster/mpb/v5/go.sum +++ b/vendor/github.com/vbauerster/mpb/v5/go.sum @@ -4,5 +4,5 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpH github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed h1:WBkVNH1zd9jg/dK4HCM4lNANnmd12EHC9z+LmcCG4ns= -golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 h1:+CBz4km/0KPU3RGTwARGh/noP3bEwtHcq+0YcBQM2JQ= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/vbauerster/mpb/v5/progress.go b/vendor/github.com/vbauerster/mpb/v5/progress.go index ac1ce50ab..fb66ce05d 100644 --- a/vendor/github.com/vbauerster/mpb/v5/progress.go +++ b/vendor/github.com/vbauerster/mpb/v5/progress.go @@ -8,6 +8,7 @@ import ( "io" "io/ioutil" "log" + "math" "os" "sync" "time" @@ -40,7 +41,6 @@ type pState struct { pMatrix map[int][]chan int aMatrix map[int][]chan int barShutdownQueue []*Bar - barPopQueue []*Bar // following are provided/overrided by user idCount int @@ -179,7 +179,7 @@ func (p *Progress) BarCount() int { } } -// Wait waits far all bars to complete and finally shutdowns container. +// Wait waits for all bars to complete and finally shutdowns container. // After this method has been called, there is no way to reuse *Progress // instance. func (p *Progress) Wait() { @@ -301,27 +301,18 @@ func (s *pState) flush(cw *cwriter.Writer) error { delete(s.parkedBars, b) b.toDrop = true } + if s.popCompleted && !b.noPop { + lineCount -= b.extendedLines + 1 + b.toDrop = true + } if b.toDrop { delete(bm, b) s.heapUpdated = true - } else if s.popCompleted { - if b := b; !b.noPop { - defer func() { - s.barPopQueue = append(s.barPopQueue, b) - }() - } } b.cancel() } s.barShutdownQueue = s.barShutdownQueue[0:0] - for _, b := range s.barPopQueue { - delete(bm, b) - s.heapUpdated = true - lineCount -= b.extendedLines + 1 - } - s.barPopQueue = s.barPopQueue[0:0] - for b := range bm { heap.Push(&s.bHeap, b) } @@ -370,7 +361,7 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio } if s.popCompleted && !bs.noPop { - bs.priority = -1 + bs.priority = -(math.MaxInt32 - s.idCount) } bs.bufP = bytes.NewBuffer(make([]byte, 0, 128)) @@ -382,17 +373,18 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio func syncWidth(matrix map[int][]chan int) { for _, column := range matrix { - column := column - go func() { - var maxWidth int - for _, ch := range column { - if w := <-ch; w > maxWidth { - maxWidth = w - } - } - for _, ch := range column { - ch <- maxWidth - } - }() + go maxWidthDistributor(column) + } +} + +var maxWidthDistributor = func(column []chan int) { + var maxWidth int + for _, ch := range column { + if w := <-ch; w > maxWidth { + maxWidth = w + } + } + for _, ch := range column { + ch <- maxWidth } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 397ab70be..b55515db1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -70,7 +70,7 @@ github.com/containernetworking/plugins/pkg/utils/hwaddr github.com/containernetworking/plugins/pkg/utils/sysctl github.com/containernetworking/plugins/plugins/ipam/host-local/backend github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator -# github.com/containers/buildah v1.19.2 +# github.com/containers/buildah v1.19.3 github.com/containers/buildah github.com/containers/buildah/bind github.com/containers/buildah/chroot @@ -108,7 +108,7 @@ github.com/containers/common/pkg/umask github.com/containers/common/version # github.com/containers/conmon v2.0.20+incompatible github.com/containers/conmon/runner/config -# github.com/containers/image/v5 v5.9.0 +# github.com/containers/image/v5 v5.10.1 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -120,6 +120,7 @@ github.com/containers/image/v5/docker/policyconfiguration github.com/containers/image/v5/docker/reference github.com/containers/image/v5/docker/tarfile github.com/containers/image/v5/image +github.com/containers/image/v5/internal/blobinfocache github.com/containers/image/v5/internal/iolimits github.com/containers/image/v5/internal/pkg/keyctl github.com/containers/image/v5/internal/pkg/platform @@ -348,7 +349,7 @@ github.com/json-iterator/go # github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a github.com/juju/ansiterm github.com/juju/ansiterm/tabwriter -# github.com/klauspost/compress v1.11.5 +# github.com/klauspost/compress v1.11.7 github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -553,7 +554,7 @@ github.com/uber/jaeger-client-go/transport github.com/uber/jaeger-client-go/utils # github.com/uber/jaeger-lib v2.2.0+incompatible github.com/uber/jaeger-lib/metrics -# github.com/ulikunitz/xz v0.5.8 +# github.com/ulikunitz/xz v0.5.9 github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog @@ -562,7 +563,7 @@ github.com/ulikunitz/xz/lzma github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v5 v5.3.0 +# github.com/vbauerster/mpb/v5 v5.4.0 github.com/vbauerster/mpb/v5 github.com/vbauerster/mpb/v5/cwriter github.com/vbauerster/mpb/v5/decor |