diff options
201 files changed, 3551 insertions, 1782 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index 5a4815e1c..eda03bf23 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -46,7 +46,7 @@ env: #### Control variables that determine what to run and how to run it. #### N/B: Required ALL of these are set for every single task. #### - TEST_FLAVOR: # int, sys, ext_svc, smoke, automation, etc. + TEST_FLAVOR: # int, sys, ext_svc, validate, automation, etc. TEST_ENVIRON: host # 'host' or 'container' PODBIN_NAME: podman # 'podman' or 'remote' PRIV_NAME: root # 'root' or 'rootless' @@ -78,6 +78,25 @@ ext_svc_check_task: env: TEST_FLAVOR: ext_svc CTR_FQIN: ${FEDORA_CONTAINER_FQIN} + # NOTE: The default way Cirrus-CI clones is *NOT* compatible with + # environment expectations in contrib/cirrus/lib.sh. Specifically + # the 'origin' remote must be defined, and all remote branches/tags + # must be available for reference from CI scripts. + clone_script: &full_clone | + cd / + rm -rf $CIRRUS_WORKING_DIR + mkdir -p $CIRRUS_WORKING_DIR + git clone --recursive --branch=$DEST_BRANCH https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR + cd $CIRRUS_WORKING_DIR + git remote update origin + if [[ -n "$CIRRUS_PR" ]]; then # running for a PR + git fetch origin pull/$CIRRUS_PR/head:pull/$CIRRUS_PR + git checkout pull/$CIRRUS_PR + else + git reset --hard $CIRRUS_CHANGE_IN_REPO + fi + make install.tools + setup_script: &setup '$GOSRC/$SCRIPT_BASE/setup_environment.sh' main_script: &main '/usr/bin/time --verbose --output="$STATS_LOGFILE" $GOSRC/$SCRIPT_BASE/runner.sh' always: &runner_stats @@ -97,43 +116,7 @@ automation_task: TEST_FLAVOR: automation CTR_FQIN: ${FEDORA_CONTAINER_FQIN} TEST_ENVIRON: container - setup_script: *setup - main_script: *main - always: *runner_stats - - -# This task use to be called 'gating', however that name is being -# used downstream for release testing. Renamed this to avoid confusion. -# All it does is run basic golang formatting and commit validation checks. -smoke_task: - alias: 'smoke' - name: "Smoke Test" - skip: *branches_and_tags - container: &bigcontainer - image: ${CTR_FQIN} - # Leave some resources for smallcontainer - cpu: 6 - memory: 22 - env: - TEST_FLAVOR: 'smoke' - CTR_FQIN: "${FEDORA_CONTAINER_FQIN}" - TEST_ENVIRON: container - # This clone script is also used to initially populate gopath_cache (below) - clone_script: &full_clone | - cd / - rm -rf $CIRRUS_WORKING_DIR - mkdir -p $CIRRUS_WORKING_DIR - git clone --recursive --branch=$DEST_BRANCH https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR - cd $CIRRUS_WORKING_DIR - git remote update origin - if [[ -n "$CIRRUS_PR" ]]; then # running for a PR - git fetch origin pull/$CIRRUS_PR/head:pull/$CIRRUS_PR - git checkout pull/$CIRRUS_PR - else - git reset --hard $CIRRUS_CHANGE_IN_REPO - fi - cd $CIRRUS_WORKING_DIR - make install.tools + clone_script: *full_clone setup_script: *setup main_script: *main always: *runner_stats @@ -211,11 +194,15 @@ build_task: validate_task: name: "Validate $DISTRO_NV Build" alias: validate - skip: *tags + # This task is primarily intended to catch human-errors early on, in a + # PR. Skip it for branch-push, branch-create, and tag-push to improve + # automation reliability/speed in those contexts. Any missed errors due + # to nonsequential PR merging practices, will be caught on a future PR, + # build or test task failures. + skip: *branches_and_tags depends_on: - ext_svc_check - automation - - smoke - build # golangci-lint is a very, very hungry beast. gce_instance: &bigvm @@ -342,7 +329,7 @@ static_alt_build_task: - build # Community-maintained task, may fail on occasion. If so, uncomment # the next line and file an issue with details about the failure. - allow_failures: $CI == $CI + # allow_failures: $CI == $CI gce_instance: *bigvm env: <<: *stdenvars @@ -351,14 +338,13 @@ static_alt_build_task: ALT_NAME: 'Static build' # Do not use 'latest', fixed-version tag for runtime stability. CTR_FQIN: "docker.io/nixos/nix:2.3.6" - # This is critical, it helps to avoid a very lengthy process of - # statically building every dependency needed to build podman. - # Assuming the dependency and build description hasn't changed, - # this cache ensures only the static podman binary is built. - nix_cache: - folder: '/var/cache/nix' - # Cirrus will calculate/use sha of this output as the cache key - fingerprint_script: echo "${IMAGE_SUFFIX}" && cat nix/* + # Authentication token for pushing the build cache to cachix. + # This is critical, it helps to avoid a very lengthy process of + # statically building every dependency needed to build podman. + # Assuming the pinned nix dependencies in nix/nixpkgs.json have not + # changed, this cache will ensure that only the static podman binary is + # built. + CACHIX_AUTH_TOKEN: ENCRYPTED[df0d4d0a67474e8ea49cc503221dcb912b7e2ba45c8ec4bf2e5fd9c49a18ac21c24bacee59b5393355ed9e4358d2baef] setup_script: *setup main_script: *main always: *binary_artifacts @@ -646,7 +632,6 @@ success_task: depends_on: - ext_svc_check - automation - - smoke - build - validate - bindings @@ -639,6 +639,18 @@ install.libseccomp.sudo: cd ../../seccomp/libseccomp && git checkout --detach $(LIBSECCOMP_COMMIT) && ./autogen.sh && ./configure --prefix=/usr && make all && make install +.PHONY: completions +completions: podman podman-remote + # key = shell, value = completion filename + declare -A outfiles=([bash]=%s [zsh]=_%s [fish]=%s.fish);\ + for shell in $${!outfiles[*]}; do \ + for remote in "" "-remote"; do \ + podman="podman$$remote"; \ + outfile=$$(printf "completions/$$shell/$${outfiles[$$shell]}" $$podman); \ + ./bin/$$podman completion $$shell >| $$outfile; \ + done;\ + done + .PHONY: validate.completions validate.completions: SHELL:=/usr/bin/env bash # Set shell to bash for this target validate.completions: diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 5a5d69193..82ecd741a 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -1,207 +1,151 @@ # Release Notes + ## 3.0.0 ### Features -- Add ability to set system wide options for slirp4netns -- Add --cidfile to container kill -- Add commas between mount options -- Add compose regression to ci -- Add containerenv information to /run/.containerenv -- Add default sysctls for pod infra containers -- Add --filter to podman system prune -- Adding json formatting to `--list-tags` option in `podman search` command. -- Add mask and unmask option to --security-opt -- Add 'MemUsageBytes' format option -- Add more information and examples on podman and pipes -- Add network filter for podman ps and pod ps -- Add Networks format placeholder to podman ps and pod ps -- Add pod filter for ps -- Add podman network create option for bridge mtu -- Add podman network create option for bridge vlan -- Add pre checkpoint -- Add Security information to podman info -- Add support for Gentoo file to package query -- Add support for network ids -- Add support for pacman package version query -- Add support for persistent volume claims in kube files -- Add support for --platform -- Add systempaths=unconfined option -- Add volume filters to system prune -- Add volume prune --filter support -- Allow podman push to push manifest lists -- Allow users to specify TMPDIR in containers.conf -- Always add the default gateway to the cni config file -- Drop default log-level from error to warn -- Enable short-name aliasing -- Generate kube on multiple containers -- Generate systemd: do not set `KillMode` -- Image sign using per user registries.d -- Implement pod-network-reload -- Include named volumes in container migration -- Initial implementation of renaming containers -- Initial implementation of volume plugins -- Network connect disconnect on non-running containers -- Not use local image create/add manifest -- Podman network label support -- Prepare support in kube play for other volume types than hostPath -- Remote copy -- Remove the ability to use [name:tag] in podman load command -- Remove varlink support from Podman -- Sign multi-arch images -- Support --network=default as if it was private -- Support Unix timestamps for `podman logs --since` +- Podman now features initial support for Docker Compose. +- Added the `podman rename` command, which allows containers to be renamed after they are created ([#1925](https://github.com/containers/podman/issues/1925)). +- The Podman remote client now supports the `podman copy` command. +- A new command, `podman network reload`, has been added. This command will re-configure the network of all running containers, and can be used to recreate firewall rules lost when the system firewall was reloaded (e.g. via `firewall-cmd --reload`). +- Podman networks now have IDs. They can be seen in `podman network ls` and can be used when removing and inspecting networks. Existing networks receive IDs automatically. +- Podman networks now also support labels. They can be added via the `--label` option to `network create`, and `podman network ls` can filter labels based on them. +- The `podman network create` command now supports setting bridge MTU and VLAN through the `--opt` option ([#8454](https://github.com/containers/podman/issues/8454)). +- The `podman container checkpoint` and `podman container restore` commands can now checkpoint and restore containers that include volumes. +- The `podman container checkpoint` command now supports the `--with-previous` and `--pre-checkpoint` options, and the `podman container restore` command now support the `--import-previous` option. These add support for two-step checkpointing with lowered dump times. +- The `podman push` command can now push manifest lists. Podman will first attempt to push as an image, then fall back to pushing as a manifest list if that fails. +- The `podman generate kube` command can now be run on multiple containers at once, and will generate a single pod containing all of them. +- The `podman kill` command now supports a `--cidfile` option to kill containers given a file containing the container's ID ([#8443](https://github.com/containers/podman/issues/8443)). +- The `podman volume create` command can now specify volume UID and GID as options with the `UID` and `GID` fields passed to the the `--opt` option. +- Initial support has been added for Docker Volume Plugins. Podman can now define available plugins in `containers.conf` and use them to create volumes with `podman volume create --driver`. +- The `podman run` and `podman create` commands now support a new option, `--platform`, to specify the platform of the image to be used when creating the container. +- The `--security-opt` option to `podman run` and `podman create` now supports the `systempaths=unconfined` option to unrestrict access to all paths in the container, as well as `mask` and `unmask` options to allow more granular restriction of container paths. +- The `podman stats --format` command now supports a new format specified, `MemUsageBytes`, which prints the raw bytes of memory consumed by a container without human-readable formatting [#8945](https://github.com/containers/podman/issues/8945). +- The `podman ps` command can now filter containers based on what pod they are joined to via the `pod` filter ([#8512](https://github.com/containers/podman/issues/8512)). +- The `podman pod ps` command can now filter pods based on what networks they are joined to via the `network` filter. +- The `podman pod ps` command can now print information on what networks a pod is joined to via the `.Networks` specifier to the `--format` option. +- The `podman system prune` command now supports filtering what containers, pods, images, and volumes will be pruned. +- The `podman volume prune` commands now supports filtering what volumes will be pruned. +- The `podman system prune` command now includes information on space reclaimed ([#8658](https://github.com/containers/podman/issues/8658)). +- The `podman info` command will now properly print information about packages in use on Gentoo and Arch systems. +- The `containers.conf` file now contains an option for disabling creation of a new kernel keyring on container creation ([#8384](https://github.com/containers/podman/issues/8384)). +- The `podman image sign` command can now sign multi-arch images by producing a signature for each image in a given manifest list. +- The `podman image sign` command, when run as rootless, now supports per-user registry configuration files in `$HOME/.config/containers/registries.d`. +- Configuration options for `slirp4netns` can now be set system-wide via the `NetworkCmdOptions` configuration option in `containers.conf`. + ### Changes -- Add LogSize to container inspect -- Allow image errors to bubble up from lower level functions. -- Change name of imageVolumes in container config JSON -- Cleanup CNI Networks on reboot -- Consolidate filter logic to pkg subdirectory -- Make `podman stats` slirp check more robust -- More /var/run -> /run -- Prefer read/write images over read/only images -- Refactor kube.ToSpecGen parameters to struct -- Rename AutocompletePortCommand func -- Repeat system pruning until there is nothing removed -- Switch references of /var/run -> /run -- Use HTTPProxy settings from containers.conf -- Use Libpod tmpdir for pause path -- Use Options as CRImportCheckpoint() argument -- Use Options as exportCheckpoint() argument -- Use PasswordCallback instead of Password for ssh -- Use abi PodPs implementation for libpod/pods/json endpoint -- Validate that the bridge option is supported -- archive: move stat-header handling into copy package -- libpod, conmon: change log level for rootless -- libpod: change function to accept ExecOptions -- libpod: handle single user mapped as root -- make podman play use ENVs from image -- pkg/copy: introduce a Copier -- podman events allow future time for --until -- podman.service should be an exec service not a notify service -- rewrite podman-cp -- rootless: add function to retrieve gid/uid mappings -- rootless: automatically split userns ranges -- runtime: set XDG_* env variables if missing -- shell completion for the network flag -- specgen: improve heuristic for /sys bind mount -- systemd: make rundir always accessible +- Shortname aliasing support has now been turned on by default. All Podman commands that must pull an image will, if a TTY is available, prompt the user about what image to pull. +- The `podman load` command no longer accepts a `NAME[:TAG]` argument. The presence of this argument broke CLI compatibility with Docker by making `docker load` commands unusable with Podman ([#7387](https://github.com/containers/podman/issues/7387)). +- The Go bindings for the HTTP API have been rewritten with a focus on limiting dependency footprint and improving extensibility. Read more [here](https://github.com/containers/podman/blob/v3.0/pkg/bindings/README.md). +- The legacy Varlink API has been completely removed from Podman. +- The default log level for Podman has been changed from Error to Warn. +- The `podman inspect` command has had the `LogPath` and `LogTag` fields moved into the `LogConfig` structure (from the root of the Inspect structure). The maximum size of the log file is also included. +- The `podman generate systemd` command no longer generates unit files using the deprecated `KillMode=none` option ([#8615](https://github.com/containers/podman/issues/8615)). +- The `podman stop` command now releases the container lock while waiting for it to stop - as such, commands like `podman ps` will no longer block until `podman stop` completes ([#8501](https://github.com/containers/podman/issues/8501)). +- Networks created with `podman network create --internal` no longer use the `dnsname` plugin. This configuration never functioned as expected. +- Error messages for the remote Podman client have been improved when it cannot connect to a Podman service. +- Error messages for `podman run` when an invalid SELinux is specified have been improved. +- Rootless Podman features improved support for containers with a single user mapped into the rootless user namespace. +- Pod infra containers now respect default sysctls specified in `containers.conf` allowing for advanced configuration of the namespaces they will share. +- SSH public key handling for remote Podman has been improved. + ### Bugfixes -- Close image rawSource when each loop ends -- Containers should not get inheritable caps by default -- Correct port range logic for port generation -- Correct which network commands can be run as rootless -- Disable CGv1 pod stats on net=host post -- Do not error on installing duplicate shutdown handler -- Do not ignore infra command from config files -- Do not mount sysfs as rootless in more cases -- Do not pull if image domain is localhost -- Do not use "true" after "syslog" in exit commands -- Do not validate the volume source path in specgen -- Don't accidently remove XDG_RUNTIME_DIR when reseting storage -- Ensure that `podman play kube` actually reports errors -- Ensure that user-specified HOSTNAME is honored -- Ensure we do not edit container config in Exec -- Exorcise Driver code from libpod/define -- Expose Height/Width fields to decoder -- Expose security attribute errors with their own messages -- Fix Wrong image tag is used when creating a container from an image with multiple tags -- Fix `podman images...` missing headers in table templates -- Fix build for mips architecture -- Fix build for mips architecture follow-up -- Fix custom mac address with a custom cni network -- Fix extra quotation mark in manpages. -- Fix missing options in volumes display while setting uid and gid -- Fix missing podman-container-rename man page link -- Fix network ls --filter invalid value flake -- Fix option names --subuidname and --subgidname -- Fix panic in libpod images exists endpoint -- Fix podman build --logfile -- Fix podman logs read partial log lines -- Fix problems reported by staticcheck -- Fix problems with network remove -- Fix shell completion for ps --filter ancestor -- Fix some nit -- Fix spelling mistakes -- Fix storage.conf to define driver in the VM -- Fix support for rpmbuild < 4.12.0. -- Fix: unpause not supported for CGv1 rootless -- Fxes /etc/hosts duplicated every time after container restarted in a pod -- Handle --rm when starting a container -- Handle podman exec capabilities correctly -- Honor the --layers flag -- Ignore containers.conf sysctls when sharing namespaces -- Improve error message when the the podman service is not enabled -- Make podman generate systemd --new flag parsing more robust -- Pass down EnableKeyring from containers.conf to conmon -- Properly handle --cap-add all when running with a --user flag -- Revert "Allow multiple --network flags for podman run/create" -- Revert e6fbc15f26b2a609936dfc11732037c70ee14cba -- Revert the custom cobra vendor -- Rework pruning to report reclaimed space -- Set NetNS mode instead of value -- The slirp4netns sandbox requires pivot_root -- close journald when reading -- container create: do not clear image name -- container stop: release lock before calling the runtime -- exec: honor --privileged -- fix: disable seccomp by default when privileged. -- image list: ignore bare manifest list -- network: disallow CNI networks with user namespaces -- oci: keep LC_ env variables to conmon -- oci: use /proc/self/fd/FD to open unix socket -- pass full NetworkMode to ParseNetworkNamespace -- play kube: fix args/command handling -- play kube: set entrypoint when interpreting Command -- podman build --force-rm defaults to true in code -- podman logs honor stderr correctly -- podman, exec: move conmon to the correct cgroup -- podman-remote fix sending tar content -- podman: drop checking valid rootless UID -- re-open container log files -- security: honor systempaths=unconfined for ro paths +- Fixed a bug where the `podman history --no-trunc` command would truncate the `Created By` field ([#9120](https://github.com/containers/podman/issues/9120)). +- Fixed a bug where root containers that did not explicitly specify a CNI network to join did not generate an entry for the network in use in the `Networks` field of the output of `podman inspect` ([#6618](https://github.com/containers/podman/issues/6618)). +- Fixed a bug where, under some circumstances, container working directories specified by the image (via the `WORKDIR` instruction) but not present in the image, would not be created ([#9040](https://github.com/containers/podman/issues/9040)). +- Fixed a bug where the `podman generate systemd` command would generate invalid unit files if the container was creating using a command line that included doubled braces (`{{` and `}}`), e.g. `--log-opt-tag={{.Name}}` ([#9034](https://github.com/containers/podman/issues/9034)). +- Fixed a bug where the `podman generate systemd --new` command could generate unit files including invalid Podman commands if the container was created using merged short options (e.g. `podman run -dt`) ([#8847](https://github.com/containers/podman/issues/8847)). +- Fixed a bug where rootless containers joining CNI networks could not set a static IP address ([#7842](https://github.com/containers/podman/issues/7842)). +- Fixed a bug where rootless containers joining CNI networks could not set network aliases ([#8567](https://github.com/containers/podman/issues/8567)). +- Fixed a bug where the remote client could, under some circumstances, not include the `Containerfile` when sending build context to the server ([#8374](https://github.com/containers/podman/issues/8374)). +- Fixed a bug where rootless Podman did not mount `/sys` as a new `sysfs` in some circumstances where it was acceptable. +- Fixed a bug where rootless containers that both joined a user namespace and a CNI networks would cause a segfault. These options are incompatible and now return an error. +- Fixed a bug where the `podman play kube` command did not properly handle `CMD` and `ARGS` from images ([#8803](https://github.com/containers/podman/issues/8803)). +- Fixed a bug where the `podman play kube` command did not properly handle environment variables from images ([#8608](https://github.com/containers/podman/issues/8608)). +- Fixed a bug where the `podman play kube` command did not properly print errors that occurred when starting containers. +- Fixed a bug where the `podman play kube` command errored when `hostNetwork` was used ([#8790](https://github.com/containers/podman/issues/8790)). +- Fixed a bug where the `podman play kube` command would always pull images when the `:latest` tag was specified, even if the image was available locally ([#7838](https://github.com/containers/podman/issues/7838)). +- Fixed a bug where containers in a pod would create a duplicate entry in the pod's shared `/etc/hosts` file every time the container restarted ([#8921](https://github.com/containers/podman/issues/8921)). +- Fixed a bug where the `podman search --list-tags` command did not support the `--format` option ([#8740](https://github.com/containers/podman/issues/8740)). +- Fixed a bug where the `http_proxy` option in `containers.conf` was not being respected, and instead was set unconditionally to true ([#8843](https://github.com/containers/podman/issues/8843)). +- Fixed a bug where rootless Podman could, on systems with a recent Conmon and users with a long username, fail to attach to containers ([#8798](https://github.com/containers/podman/issues/8798)). +- Fixed a bug where the `podman images` command would break and fail to display any images if an empty manifest list was present in storage ([#8931](https://github.com/containers/podman/issues/8931)). +- Fixed a bug where locale environment variables were not properly passed on to Conmon. +- Fixed a bug where Podman would not build on the MIPS architecture ([#8782](https://github.com/containers/podman/issues/8782)). +- Fixed a bug where rootless Podman could fail to properly configure user namespaces for rootless containers when the user specified a `--uidmap` option that included a mapping beginning with UID `0`. +- Fixed a bug where the `podman logs` command using the `k8s-file` backend did not properly handle partial log lines with a length of 1 ([#8879](https://github.com/containers/podman/issues/8879)). +- Fixed a bug where the `podman logs` command with the `--follow` option did not properly handle log rotation ([#8733](https://github.com/containers/podman/issues/8733)). +- Fixed a bug where user-specified `HOSTNAME` environment variables were overwritten by Podman ([#8886](https://github.com/containers/podman/issues/8886)). +- Fixed a bug where Podman would applied default sysctls from `containers.conf` in too many situations (e.g. applying network sysctls when the container shared its network with a pod). +- Fixed a bug where Podman did not properly handle cases where a secondary image store was in use and an image was present in both the secondary and primary stores ([#8176](https://github.com/containers/podman/issues/8176)). +- Fixed a bug where systemd-managed rootless Podman containers where the user in the container was not root could fail as the container's PID file was not accessible to systemd on the host ([#8506](https://github.com/containers/podman/issues/8506)). +- Fixed a bug where the `--privileged` option to `podman run` and `podman create` would, under some circumstances, not disable Seccomp ([#8849](https://github.com/containers/podman/issues/8849)). +- Fixed a bug where the `podman exec` command did not properly add capabilities when the container or exec session were run with `--privileged`. +- Fixed a bug where rootless Podman would use the `--enable-sandbox` option to `slirp4netns` unconditionally, even when `pivot_root` was disabled, rendering `slirp4netns` unusable when `pivot_root` was disabled ([#8846](https://github.com/containers/podman/issues/8846)). +- Fixed a bug where `podman build --logfile` did not actually write the build's log to the logfile. +- Fixed a bug where the `podman system service` command did not close STDIN, and could display user-interactive prompts ([#8700](https://github.com/containers/podman/issues/8700)). +- Fixed a bug where the `podman system reset` command could, under some circumstances, remove all the contents of the `XDG_RUNTIME_DIR` directory ([#8680](https://github.com/containers/podman/issues/8680)). +- Fixed a bug where the `podman network create` command created CNI configurations that did not include a default gateway ([#8748](https://github.com/containers/podman/issues/8748)). +- Fixed a bug where the `podman.service` systemd unit provided by default used the wrong service type, and would cause systemd to not correctly register the service as started ([#8751](https://github.com/containers/podman/issues/8751)). +- Fixed a bug where, if the `TMPDIR` environment variable was set for the container engine in `containers.conf`, it was being ignored. +- Fixed a bug where the `podman events` command did not properly handle future times given to the `--until` option ([#8694](https://github.com/containers/podman/issues/8694)). +- Fixed a bug where the `podman logs` command wrote container `STDERR` logs to `STDOUT` instead of `STDERR` ([#8683](https://github.com/containers/podman/issues/8683)). +- Fixed a bug where containers created from an image with multiple tags would report that they were created from the wrong tag ([#8547](https://github.com/containers/podman/issues/8547)). +- Fixed a bug where container capabilities were not set properly when the `--cap-add=all` and `--user` options to `podman create` and `podman run` were combined. +- Fixed a bug where the `--layers` option to `podman build` was nonfunctional ([#8643](https://github.com/containers/podman/issues/8643)). +- Fixed a bug where the `podman system prune` command did not act recursively, and thus would leave images, containers, pods, and volumes present that would be removed by a subsequent call to `podman system prune` ([#7990](https://github.com/containers/podman/issues/7990)). +- Fixed a bug where the `--publish` option to `podman run` and `podman create` did not properly handle ports specified as a range of ports with no host port specified ([#8650](https://github.com/containers/podman/issues/8650)). +- Fixed a bug where `--format` did not support JSON output for individual fields ([#8444](https://github.com/containers/podman/issues/8444)). +- Fixed a bug where the `podman stats` command would fail when run on root containers using the `slirp4netns` network mode ([#7883](https://github.com/containers/podman/issues/7883)). +- Fixed a bug where the Podman remote client would ask for a password even if the server's SSH daemon did not support password authentication ([#8498](https://github.com/containers/podman/issues/8498)). + ### API -- Add API for communicating with Docker volume plugins -- Change bindings to stop two API calls for ping -- Close the stdin/tty when using podman as a restAPI. -- Compat api containers/json add support for filters -- Container rename bindings -- Do not pass name argument to Load API -- Docker compat API - /images/search returns wrong structure (#7857) -- Docker compat API - containers create ignores the name -- Fix some network compat api problems -- Jira RUN-1106 Container handlers updates -- Jira RUN-1106 Image handlers updates -- Jira RUN-1106 Network handlers updates -- Jira RUN-1106 System handlers updates -- Jira RUN-1106 Volumes handlers updates -- Makefile: add target to generate bindings -- More docker compat API fixes -- Podman image bindings for 3.0 -- REST API v2 - ping - fix typo in header -- REST API v2 - ping - remove newline from response to improve Docker compatibility -- Reduce general binding binary size -- Restore compatible API for prune endpoints -- compat create should use bindings -- hack/podman-socat captures the API stream -- libpod API: pull: fix channel race -- misc bindings to podman v3 -- pkg/copy: add parsing API -- podman v3 container bindings -- podman v3 pod bindings +- The Compat API for Containers now supports the Rename and Copy APIs. +- Fixed a bug where the Compat Prune APIs (for volumes, containers, and images) did not return the amount of space reclaimed in their responses. +- Fixed a bug where the Compat and Libpod Exec APIs for Containers would drop errors that occurred prior to the exec session successfully starting (e.g. a "no such file" error if an invalid executable was passed) ([#8281](https://github.com/containers/podman/issues/8281)) +- Fixed a bug where the Volumes field in the Compat Create API for Containers was being ignored ([#8649](https://github.com/containers/podman/issues/8649)). +- Fixed a bug where the NetworkMode field in the Compat Create API for Containers was not handling some values, e.g. `container:`, correctly. +- Fixed a bug where the Compat Create API for Containers did not set container name properly. +- Fixed a bug where containers created using the Compat Create API unconditionally used Kubernetes file logging (the default specified in `containers.conf` is now used). +- Fixed a bug where the Compat Inspect API for Containers could include container states not recognized by Docker. +- Fixed a bug where Podman did not properly clean up after calls to the Events API when the `journald` backend was in use, resulting in a leak of file descriptors ([#8864](https://github.com/containers/podman/issues/8864)). +- Fixed a bug where the Libpod Pull endpoint for Images could fail with an `index out of range` error under certain circumstances ([#8870](https://github.com/containers/podman/issues/8870)). +- Fixed a bug where the Libpod Exists endpoint for Images could panic. +- Fixed a bug where the Compat List API for Containers did not support all filters ([#8860](https://github.com/containers/podman/issues/8860)). +- Fixed a bug where the Compat and Libpod Resize APIs for Containers ignored the height and width parameters ([#7102](https://github.com/containers/podman/issues/7102)). +- Fixed a bug where the Compat Search API for Images returned an incorrectly-formatted JSON response ([#8758](https://github.com/containers/podman/pull/8758)). +- Fixed a bug where the Compat Load API for Images did not properly clean up temporary files. +- Fixed a bug where the Compat Create API for Networks could panic when an empty IPAM configuration was specified. +- Fixed a bug where the Compat Inspect and List APIs for Networks did not include Scope. + +### Misc +- Updated Buildah to v1.19.2 +- Updated the containers/storage library to v1.24.5 +- Updated the containers/common library to v0.33.1 + +## v2.2.1 +### Changes +- Due to a conflict with a previously-removed field, we were forced to modify the way image volumes (mounting images into containers using `--mount type=image`) were handled in the database. As a result, containers created in Podman 2.2.0 with image volumes will not have them in v2.2.1, and these containers will need to be re-created. + +### Bugfixes +- Fixed a bug where rootless Podman would, on systems without the `XDG_RUNTIME_DIR` environment variable defined, use an incorrect path for the PID file of the Podman pause process, causing Podman to fail to start ([#8539](https://github.com/containers/podman/issues/8539)). +- Fixed a bug where containers created using Podman v1.7 and earlier were unusable in Podman due to JSON decode errors ([#8613](https://github.com/containers/podman/issues/8613)). +- Fixed a bug where Podman could retrieve invalid cgroup paths, instead of erroring, for containers that were not running. +- Fixed a bug where the `podman system reset` command would print a warning about a duplicate shutdown handler being registered. +- Fixed a bug where rootless Podman would attempt to mount `sysfs` in circumstances where it was not allowed; some OCI runtimes (notably `crun`) would fall back to alternatives and not fail, but others (notably `runc`) would fail to run containers. +- Fixed a bug where the `podman run` and `podman create` commands would fail to create containers from untagged images ([#8558](https://github.com/containers/podman/issues/8558)). +- Fixed a bug where remote Podman would prompt for a password even when the server did not support password authentication ([#8498](https://github.com/containers/podman/issues/8498)). +- Fixed a bug where the `podman exec` command did not move the Conmon process for the exec session into the correct cgroup. +- Fixed a bug where shell completion for the `ancestor` option to `podman ps --filter` did not work correctly. +- Fixed a bug where detached containers would not properly clean themselves up (or remove themselves if `--rm` was set) if the Podman command that created them was invoked with `--log-level=debug`. + +### API +- Fixed a bug where the Compat Create endpoint for Containers did not properly handle the `Binds` and `Mounts` parameters in `HostConfig`. +- Fixed a bug where the Compat Create endpoint for Containers ignored the `Name` query parameter. +- Fixed a bug where the Compat Create endpoint for Containers did not properly handle the "default" value for `NetworkMode` (this value is used extensively by `docker-compose`) ([#8544](https://github.com/containers/podman/issues/8544)). +- Fixed a bug where the Compat Build endpoint for Images would sometimes incorrectly use the `target` query parameter as the image's tag. + ### Misc -- Bump github.com/containernetworking/plugins from 0.8.7 to 0.9.0 -- Bump github.com/containers/common from 0.30.0 to 0.31.1 -- Bump github.com/containers/image/v5 from 5.8.1 to 5.9.0 -- Bump github.com/containers/storage from 1.24.1 to 1.24.5 -- Bump github.com/cri-o/ocicni to latest master -- Bump github.com/google/uuid from 1.1.2 to 1.1.5 -- Bump github.com/onsi/gomega from 1.10.3 to 1.10.4 -- Bump github.com/opencontainers/selinux from 1.6.0 to 1.8.0 -- Bump github.com/stretchr/testify from 1.6.1 to 1.7.0 -- Bump k8s.io/apimachinery from 0.19.4 to 0.20.2 -- Bump master to v3.0.0-dev -- Bump to containers/buildah 1.9.2 -- Bump version in README to v2.2.0 -- vendor containers/psgo@v1.5.2 +- Podman v2.2.0 vendored a non-released, custom version of the `github.com/spf13/cobra` package; this has been reverted to the latest upstream release to aid in packaging. +- Updated the containers/image library to v5.9.0 ## 2.2.0 ### Features @@ -344,7 +288,7 @@ - Fixed a bug where a client disconnecting from the Libpod or Compat events endpoints could result in the server using 100% CPU ([#7946](https://github.com/containers/podman/issues/7946)). - Fixed a bug where the "no such image" error message sent by the Compat Inspect endpoint for Images returned a 404 status code with an error that was improperly formatted for Docker compatibility. - Fixed a bug where the Compat Create endpoint for networks did not properly set a default for the `driver` parameter if it was not provided by the client. -- Fixed a bug where the Compat Inspect endpoint for images did not populate the `RootFS` field of the response. +- Fixed a bug where the Compat Inspect endpoint for images did not populate the `RootFS`, `VirtualSize`, `ParentId`, `Architecture`, `Os`, and `OsVersion` fields of the response. - Fixed a bug where the Compat Inspect endpoint for images would omit the `ParentId` field if the image had no parent, and the `Created` field if the image did not have a creation time. - Fixed a bug where the Compat Remove endpoint for Networks did not support the `Force` query parameter. diff --git a/build_osx.md b/build_osx.md new file mode 100644 index 000000000..59e1797a6 --- /dev/null +++ b/build_osx.md @@ -0,0 +1,55 @@ +# Building the Podman client on macOS + +The following describes the process for building the Podman client on macOS. + +## Install brew +Podman requires brew -- a package manager for macOS. This will allow additional packages to be installed that are +needed by Podman. See the [brew project page](https://brew.sh/) for installation instructions. + +##Install build dependencies +Podman requires some software from brew to be able to build. This can be done using brew from a macOS terminal: + +``` +$ brew install go go-md2man +``` + +## Obtain Podman source code + +You can obtain the latest source code for Podman from its github repository. + +``` +$ git clone http://github.com/containers/podman go/src/github.com/containers/podman +``` + +## Build client +After completing the preparatory steps of obtaining the Podman source code and installing its dependencies, the client +can now be built. + +``` +$ cd go/src/github.com/containers/podman +$ make podman-remote-darwin +$ mv bin/podman-remote-darwin bin/podman +``` + +The binary will be located in bin/ +``` +$ ls -l bin/ +``` + +If you would like to build the docs associated with Podman on macOS: +``` +$ make install-podman-remote-darwin-docs +$ ls docs/build/remote/darwin +``` + +To install and view these manpages: + +``` +$ cp -a docs/build/remote/darwin/* /usr/share/man/man1 +$ man podman +``` + +## Using the client + +To learn how to use the Podman client, refer its +[tutorial](https://github.com/containers/podman/blob/master/docs/tutorials/remote_client.md). diff --git a/cmd/podman/common/create_opts.go b/cmd/podman/common/create_opts.go index a4da8da9e..d86a6d364 100644 --- a/cmd/podman/common/create_opts.go +++ b/cmd/podman/common/create_opts.go @@ -3,6 +3,7 @@ package common import ( "fmt" "net" + "path/filepath" "strconv" "strings" @@ -383,8 +384,29 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup } // volumes - if volumes := cc.HostConfig.Binds; len(volumes) > 0 { - cliOpts.Volume = volumes + volDestinations := make(map[string]bool) + for _, vol := range cc.HostConfig.Binds { + cliOpts.Volume = append(cliOpts.Volume, vol) + // Extract the destination so we don't add duplicate mounts in + // the volumes phase. + splitVol := strings.SplitN(vol, ":", 3) + switch len(splitVol) { + case 1: + volDestinations[vol] = true + default: + volDestinations[splitVol[1]] = true + } + } + // Anonymous volumes are added differently from other volumes, in their + // own special field, for reasons known only to Docker. Still use the + // format of `-v` so we can just append them in there. + // Unfortunately, these may be duplicates of existing mounts in Binds. + // So... We need to catch that. + for vol := range cc.Volumes { + if _, ok := volDestinations[filepath.Clean(vol)]; ok { + continue + } + cliOpts.Volume = append(cliOpts.Volume, vol) } if len(cc.HostConfig.BlkioWeightDevice) > 0 { devices := make([]string, 0, len(cc.HostConfig.BlkioWeightDevice)) diff --git a/cmd/podman/common/volumes.go b/cmd/podman/common/volumes.go index a6e6faeca..2a598d7a5 100644 --- a/cmd/podman/common/volumes.go +++ b/cmd/podman/common/volumes.go @@ -353,6 +353,10 @@ func getBindMount(args []string) (spec.Mount, error) { default: return newMount, errors.Wrapf(util.ErrBadMntOption, "%s mount option must be 'private' or 'shared'", kv[0]) } + case "consistency": + // Often used on MACs and mistakenly on Linux platforms. + // Since Docker ignores this option so shall we. + continue default: return newMount, errors.Wrapf(util.ErrBadMntOption, kv[0]) } @@ -437,6 +441,10 @@ func getTmpfsMount(args []string) (spec.Mount, error) { } newMount.Destination = filepath.Clean(kv[1]) setDest = true + case "consistency": + // Often used on MACs and mistakenly on Linux platforms. + // Since Docker ignores this option so shall we. + continue default: return newMount, errors.Wrapf(util.ErrBadMntOption, kv[0]) } @@ -534,6 +542,10 @@ func getNamedVolume(args []string) (*specgen.NamedVolume, error) { } newVolume.Dest = filepath.Clean(kv[1]) setDest = true + case "consistency": + // Often used on MACs and mistakenly on Linux platforms. + // Since Docker ignores this option so shall we. + continue default: return nil, errors.Wrapf(util.ErrBadMntOption, kv[0]) } @@ -581,6 +593,10 @@ func getImageVolume(args []string) (*specgen.ImageVolume, error) { default: return nil, errors.Wrapf(util.ErrBadMntOption, "invalid rw value %q", kv[1]) } + case "consistency": + // Often used on MACs and mistakenly on Linux platforms. + // Since Docker ignores this option so shall we. + continue default: return nil, errors.Wrapf(util.ErrBadMntOption, kv[0]) } diff --git a/cmd/podman/containers/kill.go b/cmd/podman/containers/kill.go index 28040e08a..36e3e5f59 100644 --- a/cmd/podman/containers/kill.go +++ b/cmd/podman/containers/kill.go @@ -2,8 +2,9 @@ package containers import ( "context" - "errors" "fmt" + "io/ioutil" + "strings" "github.com/containers/common/pkg/completion" "github.com/containers/podman/v2/cmd/podman/common" @@ -12,6 +13,7 @@ import ( "github.com/containers/podman/v2/cmd/podman/validate" "github.com/containers/podman/v2/pkg/domain/entities" "github.com/containers/podman/v2/pkg/signal" + "github.com/pkg/errors" "github.com/spf13/cobra" ) @@ -59,7 +61,7 @@ func killFlags(cmd *cobra.Command) { flags.StringVarP(&killOptions.Signal, signalFlagName, "s", "KILL", "Signal to send to the container") _ = cmd.RegisterFlagCompletionFunc(signalFlagName, common.AutocompleteStopSignal) cidfileFlagName := "cidfile" - flags.StringArrayVar(&killOptions.CIDFiles, cidfileFlagName, []string{}, "Read the container ID from the file") + flags.StringArrayVar(&cidFiles, cidfileFlagName, []string{}, "Read the container ID from the file") _ = cmd.RegisterFlagCompletionFunc(cidfileFlagName, completion.AutocompleteDefault) } @@ -94,6 +96,15 @@ func kill(_ *cobra.Command, args []string) error { if sig < 1 || sig > 64 { return errors.New("valid signals are 1 through 64") } + for _, cidFile := range cidFiles { + content, err := ioutil.ReadFile(string(cidFile)) + if err != nil { + return errors.Wrap(err, "error reading CIDFile") + } + id := strings.Split(string(content), "\n")[0] + args = append(args, id) + } + responses, err := registry.ContainerEngine().ContainerKill(context.Background(), args, killOptions) if err != nil { return err diff --git a/cmd/podman/containers/ps.go b/cmd/podman/containers/ps.go index d23771fc5..31f44d92f 100644 --- a/cmd/podman/containers/ps.go +++ b/cmd/podman/containers/ps.go @@ -78,7 +78,7 @@ func listFlagSet(cmd *cobra.Command) { flags := cmd.Flags() flags.BoolVarP(&listOpts.All, "all", "a", false, "Show all the containers, default is only running containers") - flags.BoolVar(&listOpts.Storage, "external", false, "Show containers in storage not controlled by Podman") + flags.BoolVar(&listOpts.External, "external", false, "Show containers in storage not controlled by Podman") filterFlagName := "filter" flags.StringSliceVarP(&filters, filterFlagName, "f", []string{}, "Filter output based on conditions given") @@ -132,10 +132,10 @@ func checkFlags(c *cobra.Command) error { } cfg := registry.PodmanConfig() if cfg.Engine.Namespace != "" { - if c.Flag("storage").Changed && listOpts.Storage { - return errors.New("--namespace and --storage flags can not both be set") + if c.Flag("storage").Changed && listOpts.External { + return errors.New("--namespace and --external flags can not both be set") } - listOpts.Storage = false + listOpts.External = false } return nil diff --git a/cmd/podman/containers/rm.go b/cmd/podman/containers/rm.go index ea616b6e5..884ad05f4 100644 --- a/cmd/podman/containers/rm.go +++ b/cmd/podman/containers/rm.go @@ -140,6 +140,10 @@ func removeContainers(namesOrIDs []string, rmOptions entities.RmOptions, setExit } func setExitCode(err error) { + // If error is set to no such container, do not reset + if registry.GetExitCode() == 1 { + return + } cause := errors.Cause(err) switch { case cause == define.ErrNoSuchCtr: diff --git a/cmd/podman/containers/stop.go b/cmd/podman/containers/stop.go index 3a4211357..7338c8d98 100644 --- a/cmd/podman/containers/stop.go +++ b/cmd/podman/containers/stop.go @@ -3,6 +3,8 @@ package containers import ( "context" "fmt" + "io/ioutil" + "strings" "github.com/containers/common/pkg/completion" "github.com/containers/podman/v2/cmd/podman/common" @@ -10,6 +12,7 @@ import ( "github.com/containers/podman/v2/cmd/podman/utils" "github.com/containers/podman/v2/cmd/podman/validate" "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/pkg/errors" "github.com/spf13/cobra" ) @@ -58,7 +61,7 @@ func stopFlags(cmd *cobra.Command) { flags.BoolVarP(&stopOptions.Ignore, "ignore", "i", false, "Ignore errors when a specified container is missing") cidfileFlagName := "cidfile" - flags.StringArrayVarP(&stopOptions.CIDFiles, cidfileFlagName, "", nil, "Read the container ID from the file") + flags.StringArrayVar(&cidFiles, cidfileFlagName, nil, "Read the container ID from the file") _ = cmd.RegisterFlagCompletionFunc(cidfileFlagName, completion.AutocompleteDefault) timeFlagName := "time" @@ -97,6 +100,15 @@ func stop(cmd *cobra.Command, args []string) error { stopOptions.Timeout = &stopTimeout } + for _, cidFile := range cidFiles { + content, err := ioutil.ReadFile(string(cidFile)) + if err != nil { + return errors.Wrap(err, "error reading CIDFile") + } + id := strings.Split(string(content), "\n")[0] + args = append(args, id) + } + responses, err := registry.ContainerEngine().ContainerStop(context.Background(), args, stopOptions) if err != nil { return err diff --git a/cmd/podman/containers/wait.go b/cmd/podman/containers/wait.go index 2bbfbccc9..14d660678 100644 --- a/cmd/podman/containers/wait.go +++ b/cmd/podman/containers/wait.go @@ -50,7 +50,7 @@ func waitFlags(cmd *cobra.Command) { flags := cmd.Flags() intervalFlagName := "interval" - flags.StringVarP(&waitInterval, intervalFlagName, "i", "250ns", "Time Interval to wait before polling for completion") + flags.StringVarP(&waitInterval, intervalFlagName, "i", "250ms", "Time Interval to wait before polling for completion") _ = cmd.RegisterFlagCompletionFunc(intervalFlagName, completion.AutocompleteNone) conditionFlagName := "condition" diff --git a/cmd/podman/images/build.go b/cmd/podman/images/build.go index 4219e325b..1f06dace9 100644 --- a/cmd/podman/images/build.go +++ b/cmd/podman/images/build.go @@ -106,7 +106,9 @@ func buildFlags(cmd *cobra.Command) { logrus.Errorf("unable to set --pull to true: %v", err) } flag.DefValue = "true" + flag.Usage = "Always attempt to pull the image (errors are fatal)" flags.AddFlagSet(&budFlags) + // Add the completion functions budCompletions := buildahCLI.GetBudFlagsCompletions() completion.CompleteCommandFlags(cmd, budCompletions) diff --git a/cmd/podman/images/history.go b/cmd/podman/images/history.go index 964c7a975..af40dd73a 100644 --- a/cmd/podman/images/history.go +++ b/cmd/podman/images/history.go @@ -162,7 +162,7 @@ func (h historyReporter) Size() string { } func (h historyReporter) CreatedBy() string { - if len(h.ImageHistoryLayer.CreatedBy) > 45 { + if !opts.noTrunc && len(h.ImageHistoryLayer.CreatedBy) > 45 { return h.ImageHistoryLayer.CreatedBy[:45-3] + "..." } return h.ImageHistoryLayer.CreatedBy diff --git a/cmd/podman/images/push.go b/cmd/podman/images/push.go index d53a9c066..eccf93e57 100644 --- a/cmd/podman/images/push.go +++ b/cmd/podman/images/push.go @@ -98,7 +98,7 @@ func pushFlags(cmd *cobra.Command) { _ = cmd.RegisterFlagCompletionFunc(digestfileFlagName, completion.AutocompleteDefault) formatFlagName := "format" - flags.StringVarP(&pushOptions.Format, formatFlagName, "f", "", "Manifest type (oci, v2s1, or v2s2) to use when pushing an image using the 'dir' transport (default is manifest type of source)") + flags.StringVarP(&pushOptions.Format, formatFlagName, "f", "", "Manifest type (oci, v2s2, or v2s1) to use when pushing an image using the 'dir' transport (default is manifest type of source)") _ = cmd.RegisterFlagCompletionFunc(formatFlagName, common.AutocompleteManifestFormat) flags.BoolVarP(&pushOptions.Quiet, "quiet", "q", false, "Suppress output information when pushing images") @@ -114,7 +114,10 @@ func pushFlags(cmd *cobra.Command) { if registry.IsRemote() { _ = flags.MarkHidden("cert-dir") _ = flags.MarkHidden("compress") + _ = flags.MarkHidden("digestfile") _ = flags.MarkHidden("quiet") + _ = flags.MarkHidden("remove-signatures") + _ = flags.MarkHidden("sign-by") } _ = flags.MarkHidden("signature-policy") } diff --git a/cmd/podman/pods/create.go b/cmd/podman/pods/create.go index d997ea344..23fb323a0 100644 --- a/cmd/podman/pods/create.go +++ b/cmd/podman/pods/create.go @@ -171,33 +171,6 @@ func create(cmd *cobra.Command, args []string) error { if err != nil { return err } - createOptions.Net.Network = specgen.Namespace{} - if cmd.Flag("network").Changed { - netInput, err := cmd.Flags().GetString("network") - if err != nil { - return err - } - parts := strings.SplitN(netInput, ":", 2) - - n := specgen.Namespace{} - switch { - case netInput == "bridge": - n.NSMode = specgen.Bridge - case netInput == "host": - n.NSMode = specgen.Host - case netInput == "slirp4netns", strings.HasPrefix(netInput, "slirp4netns:"): - n.NSMode = specgen.Slirp - if len(parts) > 1 { - createOptions.Net.NetworkOptions = make(map[string][]string) - createOptions.Net.NetworkOptions[parts[0]] = strings.Split(parts[1], ",") - } - default: - // Container and NS mode are presently unsupported - n.NSMode = specgen.Bridge - createOptions.Net.CNINetworks = strings.Split(netInput, ",") - } - createOptions.Net.Network = n - } if len(createOptions.Net.PublishPorts) > 0 { if !createOptions.Infra { return errors.Errorf("you must have an infra container to publish port bindings to the host") diff --git a/completions/Readme.md b/completions/Readme.md index 5c9d16f3c..132a38bbf 100644 --- a/completions/Readme.md +++ b/completions/Readme.md @@ -2,6 +2,6 @@ Podman offers shell completion scripts for bash, zsh and fish. The completion scripts are available for both `podman` and `podman-remote`. -The shell completion scripts are generated by `make completion`, do not edit these files directly. To install them you can run `sudo make install.completions`. +The shell completion scripts are generated by `make completions`; do not edit these files directly. To install them you can run `sudo make install.completions`. For information about these scripts see [`man podman-completion`](../docs/source/markdown/podman-completion.1.md) diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh index bc9a95310..451a267b3 100644 --- a/contrib/cirrus/lib.sh +++ b/contrib/cirrus/lib.sh @@ -80,10 +80,19 @@ CIRRUS_CI="${CIRRUS_CI:-false}" DEST_BRANCH="${DEST_BRANCH:-master}" CONTINUOUS_INTEGRATION="${CONTINUOUS_INTEGRATION:-false}" CIRRUS_REPO_NAME=${CIRRUS_REPO_NAME:-podman} -# N/B: CIRRUS_BASE_SHA is empty on branch and tag push. -CIRRUS_BASE_SHA=${CIRRUS_BASE_SHA:-${CIRRUS_LAST_GREEN_CHANGE:-YOU_FOUND_A_BUG}} -CIRRUS_BUILD_ID=${CIRRUS_BUILD_ID:-$RANDOM$(date +%s)} # must be short and unique - +# Cirrus only sets $CIRRUS_BASE_SHA properly for PRs, but $EPOCH_TEST_COMMIT +# needs to be set from this value in order for `make validate` to run properly. +# When running get_ci_vm.sh, most $CIRRUS_xyz variables are empty. Attempt +# to accomidate both branch and get_ci_vm.sh testing by discovering the base +# branch SHA value. +# shellcheck disable=SC2154 +if [[ -z "$CIRRUS_BASE_SHA" ]] && [[ -z "$CIRRUS_TAG" ]] +then # Operating on a branch, or under `get_ci_vm.sh` + CIRRUS_BASE_SHA=$(git rev-parse ${UPSTREAM_REMOTE:-origin}/$DEST_BRANCH) +elif [[ -z "$CIRRUS_BASE_SHA" ]] +then # Operating on a tag + CIRRUS_BASE_SHA=$(git rev-parse HEAD) +fi # The starting place for linting and code validation EPOCH_TEST_COMMIT="$CIRRUS_BASE_SHA" diff --git a/contrib/cirrus/pr-should-include-tests b/contrib/cirrus/pr-should-include-tests index caf27cf83..a3b4847a7 100755 --- a/contrib/cirrus/pr-should-include-tests +++ b/contrib/cirrus/pr-should-include-tests @@ -39,6 +39,7 @@ filtered_changes=$(git diff --name-status $base $head | egrep -v '^contrib/' | egrep -v '^docs/' | egrep -v '^hack/' | + egrep -v '^nix/' | egrep -v '^vendor/' | egrep -v '^version/') if [[ -z "$filtered_changes" ]]; then diff --git a/contrib/cirrus/required_host_ports.txt b/contrib/cirrus/required_host_ports.txt index 9248e497a..5f066e059 100644 --- a/contrib/cirrus/required_host_ports.txt +++ b/contrib/cirrus/required_host_ports.txt @@ -2,3 +2,4 @@ github.com 22 docker.io 443 quay.io 443 registry.fedoraproject.org 443 +podman.cachix.org 443 diff --git a/contrib/cirrus/runner.sh b/contrib/cirrus/runner.sh index 50bc1102f..ccbdb63b6 100755 --- a/contrib/cirrus/runner.sh +++ b/contrib/cirrus/runner.sh @@ -23,22 +23,6 @@ function _run_ext_svc() { $SCRIPT_BASE/ext_svc_check.sh } -function _run_smoke() { - make gofmt - - # There is little value to validating commits after tag-push - # and it's very difficult to automatically determine a starting commit. - # $CIRRUS_TAG is only non-empty when executing due to a tag-push - # shellcheck disable=SC2154 - if [[ -z "$CIRRUS_TAG" ]]; then - # If PR consists of multiple commits, test that each compiles cleanly - make .gitvalidation - - # PRs should include some way to test. - $SCRIPT_BASE/pr-should-include-tests - fi -} - function _run_automation() { $SCRIPT_BASE/cirrus_yaml_test.py @@ -51,11 +35,14 @@ function _run_automation() { } function _run_validate() { - # Confirm compile via prior task + cache - bin/podman --version - bin/podman-remote --version + # git-validation tool fails if $EPOCH_TEST_COMMIT is empty + # shellcheck disable=SC2154 + if [[ -n "$EPOCH_TEST_COMMIT" ]]; then + make validate + else + warn "Skipping git-validation since \$EPOCH_TEST_COMMIT is empty" + fi - make validate # Some items require a build } function _run_unit() { @@ -241,15 +228,14 @@ function _run_altbuild() { req_env_vars CTR_FQIN [[ "$UID" -eq 0 ]] || \ die "Static build must execute nixos container as root on host" - mkdir -p /var/cache/nix - podman run -i --rm -v /var/cache/nix:/mnt/nix:Z \ - $CTR_FQIN cp -rfT /nix /mnt/nix - podman run -i --rm -v /var/cache/nix:/nix:Z \ - -v $PWD:$PWD:Z -w $PWD $CTR_FQIN \ - nix --print-build-logs --option cores 4 --option max-jobs 4 \ - build --file ./nix/ - # result symlink is absolute from container perspective :( - cp /var/cache/$(readlink result)/bin/podman ./ # for cirrus-ci artifact + podman run -i --rm \ + -e CACHIX_AUTH_TOKEN \ + -v $PWD:$PWD:Z -w $PWD $CTR_FQIN sh -c \ + "nix-env -iA cachix -f https://cachix.org/api/v1/install && \ + cachix use podman && \ + nix-build nix && \ + nix-store -qR --include-outputs \$(nix-instantiate nix/default.nix) | grep -v podman | cachix push podman && \ + cp -R result/bin ." rm result # makes cirrus puke ;; *) diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh index 9267b8a1c..4c95d0254 100755 --- a/contrib/cirrus/setup_environment.sh +++ b/contrib/cirrus/setup_environment.sh @@ -181,7 +181,6 @@ esac # shellcheck disable=SC2154 case "$TEST_FLAVOR" in ext_svc) ;; - smoke) ;& validate) # For some reason, this is also needed for validation make .install.pre-commit diff --git a/contrib/rootless-cni-infra/Containerfile b/contrib/rootless-cni-infra/Containerfile index 871e06a6c..4324f39d2 100644 --- a/contrib/rootless-cni-infra/Containerfile +++ b/contrib/rootless-cni-infra/Containerfile @@ -2,7 +2,7 @@ ARG GOLANG_VERSION=1.15 ARG ALPINE_VERSION=3.12 ARG CNI_VERSION=v0.8.0 ARG CNI_PLUGINS_VERSION=v0.8.7 -ARG DNSNAME_VERSION=v1.0.0 +ARG DNSNAME_VERSION=v1.1.1 FROM golang:${GOLANG_VERSION}-alpine${ALPINE_VERSION} AS golang-base RUN apk add --no-cache git @@ -33,4 +33,4 @@ COPY rootless-cni-infra /usr/local/bin ENV CNI_PATH=/opt/cni/bin CMD ["sleep", "infinity"] -ENV ROOTLESS_CNI_INFRA_VERSION=3 +ENV ROOTLESS_CNI_INFRA_VERSION=5 diff --git a/contrib/rootless-cni-infra/rootless-cni-infra b/contrib/rootless-cni-infra/rootless-cni-infra index 463254c7f..cceb8d817 100755 --- a/contrib/rootless-cni-infra/rootless-cni-infra +++ b/contrib/rootless-cni-infra/rootless-cni-infra @@ -21,16 +21,19 @@ wait_unshare_net() { done } -# CLI subcommand: "alloc $CONTAINER_ID $NETWORK_NAME $POD_NAME" +# CLI subcommand: "alloc $CONTAINER_ID $NETWORK_NAME $POD_NAME $IP $MAC $CAP_ARGS" cmd_entrypoint_alloc() { - if [ "$#" -ne 3 ]; then - echo >&2 "Usage: $ARG0 alloc CONTAINER_ID NETWORK_NAME POD_NAME" + if [ "$#" -ne 6 ]; then + echo >&2 "Usage: $ARG0 alloc CONTAINER_ID NETWORK_NAME POD_NAME IP MAC CAP_ARGS" exit 1 fi ID="$1" NET="$2" K8S_POD_NAME="$3" + IP="$4" + MAC="$5" + CAP_ARGS="$6" dir="${BASE}/${ID}" mkdir -p "${dir}/attached" "${dir}/attached-args" @@ -46,9 +49,18 @@ cmd_entrypoint_alloc() { nsenter -t "${pid}" -n ip link set lo up fi CNI_ARGS="IgnoreUnknown=1;K8S_POD_NAME=${K8S_POD_NAME}" + if [ "$IP" ]; then + CNI_ARGS="$CNI_ARGS;IP=${IP}" + fi + if [ "$MAC" ]; then + CNI_ARGS="$CNI_ARGS;MAC=${MAC}" + fi + if [ "$CAP_ARGS" ]; then + CAP_ARGS="$CAP_ARGS" + fi nwcount=$(find "${dir}/attached" -type f | wc -l) CNI_IFNAME="eth${nwcount}" - export CNI_ARGS CNI_IFNAME + export CNI_ARGS CNI_IFNAME CAP_ARGS cnitool add "${NET}" "/proc/${pid}/ns/net" >"${dir}/attached/${NET}" echo "${CNI_ARGS}" >"${dir}/attached-args/${NET}" diff --git a/docs/source/markdown/podman-build.1.md b/docs/source/markdown/podman-build.1.md index 61c05fdef..e05678e2c 100644 --- a/docs/source/markdown/podman-build.1.md +++ b/docs/source/markdown/podman-build.1.md @@ -455,9 +455,8 @@ not required for Buildah as it supports only Linux. #### **--pull** -When the option is specified or set to "true", pull the image from the first -registry it is found in as listed in registries.conf. Raise an error if not -found in the registries, even if the image is present locally. +When the option is specified or set to "true", pull the image. Raise an error +if the image could not be pulled, even if the image is present locally. If the option is disabled (with *--pull=false*) or not specified, pull the image from the registry only if the image is not present locally. Raise an diff --git a/docs/source/markdown/podman-network-create.1.md b/docs/source/markdown/podman-network-create.1.md index 2fafd1e31..86b15162a 100644 --- a/docs/source/markdown/podman-network-create.1.md +++ b/docs/source/markdown/podman-network-create.1.md @@ -7,8 +7,9 @@ podman\-network-create - Create a Podman CNI network **podman network create** [*options*] name ## DESCRIPTION -Create a CNI-network configuration for use with Podman. By default, Podman creates a bridge connection. A -*Macvlan* connection can be created with the *macvlan* option. In the case of *Macvlan* connections, the +Create a CNI-network configuration for use with Podman. By default, Podman creates a bridge connection. +A *Macvlan* connection can be created with the *-d macvlan* option. A parent device for macvlan can +be designated with the *-o parent=<device>* option. In the case of *Macvlan* connections, the CNI *dhcp* plugin needs to be activated or the container image must have a DHCP client to interact with the host network's DHCP server. @@ -55,6 +56,8 @@ Set metadata for a network (e.g., --label mykey=value). #### **--macvlan** +*This option is being deprecated* + Create a *Macvlan* based connection rather than a classic bridge. You must pass an interface name from the host for the Macvlan connection. @@ -101,7 +104,7 @@ Create a network that uses a *192.168.55.0/24** subnet and has an IP address ran Create a Macvlan based network using the host interface eth0 ``` -# podman network create --macvlan eth0 newnet +# podman network create -d macvlan -o parent=eth0 newnet /etc/cni/net.d/newnet.conflist ``` diff --git a/docs/source/markdown/podman-push.1.md b/docs/source/markdown/podman-push.1.md index f7624ed5f..3ed5f60c0 100644 --- a/docs/source/markdown/podman-push.1.md +++ b/docs/source/markdown/podman-push.1.md @@ -90,8 +90,7 @@ solely for scripting compatibility. #### **--format**, **-f**=*format* -Manifest Type (oci, v2s1, or v2s2) to use when pushing an image to a directory using the 'dir:' transport (default is manifest type of source) -Note: This flag can only be set when using the **dir** transport +Manifest Type (oci, v2s2, or v2s1) to use when pushing an image. #### **--quiet**, **-q** @@ -99,11 +98,11 @@ When writing the output image, suppress progress output #### **--remove-signatures** -Discard any pre-existing signatures in the image +Discard any pre-existing signatures in the image. (Not available for remote commands) #### **--sign-by**=*key* -Add a signature at the destination using the specified key +Add a signature at the destination using the specified key. (Not available for remote commands) #### **--tls-verify**=*true|false* @@ -10,10 +10,10 @@ require ( github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect github.com/containernetworking/cni v0.8.0 github.com/containernetworking/plugins v0.9.0 - github.com/containers/buildah v1.19.2 + github.com/containers/buildah v1.19.3 github.com/containers/common v0.33.1 github.com/containers/conmon v2.0.20+incompatible - github.com/containers/image/v5 v5.9.0 + github.com/containers/image/v5 v5.10.1 github.com/containers/psgo v1.5.2 github.com/containers/storage v1.24.5 github.com/coreos/go-systemd/v22 v22.1.0 @@ -38,7 +38,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 - github.com/onsi/ginkgo v1.14.2 + github.com/onsi/ginkgo v1.15.0 github.com/onsi/gomega v1.10.4 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 @@ -49,7 +49,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 - github.com/rootless-containers/rootlesskit v0.12.0 + github.com/rootless-containers/rootlesskit v0.13.0 github.com/sirupsen/logrus v1.7.0 github.com/spf13/cobra v1.1.1 github.com/spf13/pflag v1.0.5 @@ -95,14 +95,16 @@ github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ github.com/containernetworking/plugins v0.8.7/go.mod h1:R7lXeZaBzpfqapcAbHRW8/CYwm0dHzbz0XEjofx0uB0= github.com/containernetworking/plugins v0.9.0 h1:c+1gegKhR7+d0Caum9pEHugZlyhXPOG6v3V6xJgIGCI= github.com/containernetworking/plugins v0.9.0/go.mod h1:dbWv4dI0QrBGuVgj+TuVQ6wJRZVOhrCQj91YyC92sxg= -github.com/containers/buildah v1.19.2 h1:1/ePUtinuqTPSwXiZXPyBJmik688l1e4SUZsoOv716w= -github.com/containers/buildah v1.19.2/go.mod h1:zUMKdtZu4rs6lgKHheKwo+wBlh5ZL+1+/5/IsaNTD74= +github.com/containers/buildah v1.19.3 h1:U0E1UKzqW5C11W7giHhLZI06xkZiV40ZKDK/c1jotbE= +github.com/containers/buildah v1.19.3/go.mod h1:uZb6GuE36tmRSOcIXGfiYqdpr+GPXWmlUIJSk5sn19w= github.com/containers/common v0.33.1 h1:XpDiq8Cta8+u1s4kpYSEWdB140ZmqgyIXfWkLqKx3z0= github.com/containers/common v0.33.1/go.mod h1:mjDo/NKeweL/onaspLhZ38WnHXaYmrELHclIdvSnYpY= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/image/v5 v5.9.0 h1:dRmUtcluQcmasNo3DpnRoZjfU0rOu1qZeL6wlDJr10Q= github.com/containers/image/v5 v5.9.0/go.mod h1:blOEFd/iFdeyh891ByhCVUc+xAcaI3gBegXECwz9UbQ= +github.com/containers/image/v5 v5.10.1 h1:tHhGQ8RCMxJfJLD/PEW1qrOKX8nndledW9qz6UiAxns= +github.com/containers/image/v5 v5.10.1/go.mod h1:JlRLJZv7elVbtHaaaR6Kz8i6G3k2ttj4t7fubwxD9Hs= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c= @@ -261,8 +263,6 @@ github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIE github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.5 h1:kxhtnfFVi+rYdOALN0B3k9UT86zVJKfBimRaciULW4I= -github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -340,6 +340,8 @@ github.com/klauspost/compress v1.11.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.5 h1:xNCE0uE6yvTPRS+0wGNMHPo3NIpwnk6aluQZ6R6kRcc= github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -430,6 +432,8 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= +github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -509,8 +513,8 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rootless-containers/rootlesskit v0.12.0 h1:nEVacjGda4wBYnPLWsmZSeA+mFwajGorCuA4VHmu2OE= -github.com/rootless-containers/rootlesskit v0.12.0/go.mod h1:swXXhX7XMmJQvgqYlaezs6M6vkHN851uPXRStsFr8ug= +github.com/rootless-containers/rootlesskit v0.13.0 h1:41nnfB7yFxtHSeQHYupSvVxAJWh/hjmn03w6UjH7nv8= +github.com/rootless-containers/rootlesskit v0.13.0/go.mod h1:DwE/9ASct8sj7bueOXqKiwcdzyZ+yV6qhTAtJUO7988= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -571,6 +575,8 @@ github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/ github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.9 h1:RsKRIA2MO8x56wkkcd3LbtcE/uMszhb6DpRf+3uwa3I= +github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -579,6 +585,8 @@ github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02 github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= github.com/vbauerster/mpb/v5 v5.3.0 h1:vgrEJjUzHaSZKDRRxul5Oh4C72Yy/5VEMb0em+9M0mQ= github.com/vbauerster/mpb/v5 v5.3.0/go.mod h1:4yTkvAb8Cm4eylAp6t0JRq6pXDkFJ4krUlDqWYkakAs= +github.com/vbauerster/mpb/v5 v5.4.0 h1:n8JPunifvQvh6P1D1HAl2Ur9YcmKT1tpoUuiea5mlmg= +github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8JDaKXR4JGI= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA= @@ -598,6 +606,7 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1: github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -621,6 +630,7 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -644,6 +654,7 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -671,6 +682,7 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -744,6 +756,8 @@ golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -782,7 +796,11 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/hack/get_ci_vm.sh b/hack/get_ci_vm.sh index d1e38eb35..4f6c42a06 100755 --- a/hack/get_ci_vm.sh +++ b/hack/get_ci_vm.sh @@ -157,11 +157,14 @@ parse_args(){ VM_IMAGE_NAME="$1" - # Word-splitting is desirable in this case - # shellcheck disable=SC2207 + # Word-splitting is desirable in this case. + # Values are used literally (with '=') as args to future `env` command. + # get_env_vars() will take care of properly quoting it's output. + # shellcheck disable=SC2207,SC2191 ENVS=( $(get_env_vars) - "VM_IMAGE_NAME=$VM_IMAGE_NAME" + VM_IMAGE_NAME="$VM_IMAGE_NAME" + UPSTREAM_REMOTE="upstream" ) VMNAME="${VMNAME:-${USER}-${VM_IMAGE_NAME}}" @@ -263,7 +266,7 @@ echo -e "Note: Script can be re-used in another terminal if needed." echo -e "${RED}(option to delete VM presented upon exiting).${NOR}" # TODO: This is fairly fragile, specifically the quoting for the remote command. echo '#!/bin/bash' > $TMPDIR/ssh -echo "$SSH_CMD -- -t 'cd $GOSRC && exec env \"${ENVS[*]}\" bash -il'" >> $TMPDIR/ssh +echo "$SSH_CMD -- -t 'cd $GOSRC && exec env ${ENVS[*]} bash -il'" >> $TMPDIR/ssh chmod +x $TMPDIR/ssh showrun $TMPDIR/ssh diff --git a/libpod/container.go b/libpod/container.go index 58bf95470..ed7535bc8 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -1073,6 +1073,18 @@ func networkDisabled(c *Container) (bool, error) { return false, nil } +func (c *Container) HostNetwork() bool { + if c.config.CreateNetNS || c.config.NetNsCtr != "" { + return false + } + for _, ns := range c.config.Spec.Linux.Namespaces { + if ns.Type == spec.NetworkNamespace { + return false + } + } + return true +} + // ContainerState returns containerstate struct func (c *Container) ContainerState() (*ContainerState, error) { if !c.batched { diff --git a/libpod/kube.go b/libpod/kube.go index 753c58099..bf314b9a3 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -49,6 +49,7 @@ func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) { } extraHost := make([]v1.HostAlias, 0) + hostNetwork := false if p.HasInfraContainer() { infraContainer, err := p.getInfraContainer() if err != nil { @@ -69,9 +70,9 @@ func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) { return nil, servicePorts, err } servicePorts = containerPortsToServicePorts(ports) - + hostNetwork = p.config.InfraContainer.HostNetwork } - pod, err := p.podWithContainers(allContainers, ports) + pod, err := p.podWithContainers(allContainers, ports, hostNetwork) if err != nil { return nil, servicePorts, err } @@ -167,13 +168,14 @@ func containersToServicePorts(containers []v1.Container) []v1.ServicePort { return sps } -func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPort) (*v1.Pod, error) { +func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPort, hostNetwork bool) (*v1.Pod, error) { deDupPodVolumes := make(map[string]*v1.Volume) first := true podContainers := make([]v1.Container, 0, len(containers)) + dnsInfo := v1.PodDNSConfig{} for _, ctr := range containers { if !ctr.IsInfra() { - ctr, volumes, err := containerToV1Container(ctr) + ctr, volumes, _, err := containerToV1Container(ctr) if err != nil { return nil, err } @@ -196,6 +198,22 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor vol := vol deDupPodVolumes[vol.Name] = &vol } + } else { + _, _, infraDNS, err := containerToV1Container(ctr) + if err != nil { + return nil, err + } + if infraDNS != nil { + if servers := infraDNS.Nameservers; len(servers) > 0 { + dnsInfo.Nameservers = servers + } + if searches := infraDNS.Searches; len(searches) > 0 { + dnsInfo.Searches = searches + } + if options := infraDNS.Options; len(options) > 0 { + dnsInfo.Options = options + } + } } } podVolumes := make([]v1.Volume, 0, len(deDupPodVolumes)) @@ -203,10 +221,10 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor podVolumes = append(podVolumes, *vol) } - return addContainersAndVolumesToPodObject(podContainers, podVolumes, p.Name()), nil + return addContainersAndVolumesToPodObject(podContainers, podVolumes, p.Name(), &dnsInfo, hostNetwork), nil } -func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1.Volume, podName string) *v1.Pod { +func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1.Volume, podName string, dnsOptions *v1.PodDNSConfig, hostNetwork bool) *v1.Pod { tm := v12.TypeMeta{ Kind: "Pod", APIVersion: "v1", @@ -225,8 +243,12 @@ func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1. CreationTimestamp: v12.Now(), } ps := v1.PodSpec{ - Containers: containers, - Volumes: volumes, + Containers: containers, + Volumes: volumes, + HostNetwork: hostNetwork, + } + if dnsOptions != nil { + ps.DNSConfig = dnsOptions } p := v1.Pod{ TypeMeta: tm, @@ -241,32 +263,69 @@ func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1. func simplePodWithV1Containers(ctrs []*Container) (*v1.Pod, error) { kubeCtrs := make([]v1.Container, 0, len(ctrs)) kubeVolumes := make([]v1.Volume, 0) + hostNetwork := true + podDNS := v1.PodDNSConfig{} for _, ctr := range ctrs { - kubeCtr, kubeVols, err := containerToV1Container(ctr) + if !ctr.HostNetwork() { + hostNetwork = false + } + kubeCtr, kubeVols, ctrDNS, err := containerToV1Container(ctr) if err != nil { return nil, err } kubeCtrs = append(kubeCtrs, kubeCtr) kubeVolumes = append(kubeVolumes, kubeVols...) - } - return addContainersAndVolumesToPodObject(kubeCtrs, kubeVolumes, strings.ReplaceAll(ctrs[0].Name(), "_", "")), nil + // Combine DNS information in sum'd structure + if ctrDNS != nil { + // nameservers + if servers := ctrDNS.Nameservers; servers != nil { + if podDNS.Nameservers == nil { + podDNS.Nameservers = make([]string, 0) + } + for _, s := range servers { + if !util.StringInSlice(s, podDNS.Nameservers) { // only append if it does not exist + podDNS.Nameservers = append(podDNS.Nameservers, s) + } + } + } + // search domains + if domains := ctrDNS.Searches; domains != nil { + if podDNS.Searches == nil { + podDNS.Searches = make([]string, 0) + } + for _, d := range domains { + if !util.StringInSlice(d, podDNS.Searches) { // only append if it does not exist + podDNS.Searches = append(podDNS.Searches, d) + } + } + } + // dns options + if options := ctrDNS.Options; options != nil { + if podDNS.Options == nil { + podDNS.Options = make([]v1.PodDNSConfigOption, 0) + } + podDNS.Options = append(podDNS.Options, options...) + } + } // end if ctrDNS + } + return addContainersAndVolumesToPodObject(kubeCtrs, kubeVolumes, strings.ReplaceAll(ctrs[0].Name(), "_", ""), &podDNS, hostNetwork), nil } // containerToV1Container converts information we know about a libpod container // to a V1.Container specification. -func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) { +func containerToV1Container(c *Container) (v1.Container, []v1.Volume, *v1.PodDNSConfig, error) { kubeContainer := v1.Container{} kubeVolumes := []v1.Volume{} kubeSec, err := generateKubeSecurityContext(c) if err != nil { - return kubeContainer, kubeVolumes, err + return kubeContainer, kubeVolumes, nil, err } if len(c.config.Spec.Linux.Devices) > 0 { // TODO Enable when we can support devices and their names kubeContainer.VolumeDevices = generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices) - return kubeContainer, kubeVolumes, errors.Wrapf(define.ErrNotImplemented, "linux devices") + return kubeContainer, kubeVolumes, nil, errors.Wrapf(define.ErrNotImplemented, "linux devices") } if len(c.config.UserVolumes) > 0 { @@ -274,7 +333,7 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) { // Volume names need to be coordinated "globally" in the kube files. volumeMounts, volumes, err := libpodMountsToKubeVolumeMounts(c) if err != nil { - return kubeContainer, kubeVolumes, err + return kubeContainer, kubeVolumes, nil, err } kubeContainer.VolumeMounts = volumeMounts kubeVolumes = append(kubeVolumes, volumes...) @@ -282,16 +341,16 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) { envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env) if err != nil { - return kubeContainer, kubeVolumes, err + return kubeContainer, kubeVolumes, nil, err } portmappings, err := c.PortMappings() if err != nil { - return kubeContainer, kubeVolumes, err + return kubeContainer, kubeVolumes, nil, err } ports, err := ocicniPortMappingToContainerPort(portmappings) if err != nil { - return kubeContainer, kubeVolumes, err + return kubeContainer, kubeVolumes, nil, err } containerCommands := c.Command() @@ -355,7 +414,38 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) { } } - return kubeContainer, kubeVolumes, nil + // Obtain the DNS entries from the container + dns := v1.PodDNSConfig{} + + // DNS servers + if servers := c.config.DNSServer; len(servers) > 0 { + dnsServers := make([]string, 0) + for _, server := range servers { + dnsServers = append(dnsServers, server.String()) + } + dns.Nameservers = dnsServers + } + + // DNS search domains + if searches := c.config.DNSSearch; len(searches) > 0 { + dns.Searches = searches + } + + // DNS options + if options := c.config.DNSOption; len(options) > 0 { + dnsOptions := make([]v1.PodDNSConfigOption, 0) + for _, option := range options { + // the option can be "k:v" or just "k", no delimiter is required + opts := strings.SplitN(option, ":", 2) + dnsOpt := v1.PodDNSConfigOption{ + Name: opts[0], + Value: &opts[1], + } + dnsOptions = append(dnsOptions, dnsOpt) + } + dns.Options = dnsOptions + } + return kubeContainer, kubeVolumes, &dns, nil } // ocicniPortMappingToContainerPort takes an ocicni portmapping and converts diff --git a/libpod/network/create.go b/libpod/network/create.go index a8f985af9..88310a79c 100644 --- a/libpod/network/create.go +++ b/libpod/network/create.go @@ -29,7 +29,7 @@ func Create(name string, options entities.NetworkCreateOptions, runtimeConfig *c return nil, err } defer l.releaseCNILock() - if len(options.MacVLAN) > 0 { + if len(options.MacVLAN) > 0 || options.Driver == MacVLANNetworkDriver { fileName, err = createMacVLAN(name, options, runtimeConfig) } else { fileName, err = createBridge(name, options, runtimeConfig) @@ -256,9 +256,17 @@ func createMacVLAN(name string, options entities.NetworkCreateOptions, runtimeCo return "", err } - // Make sure the host-device exists - if !util.StringInSlice(options.MacVLAN, liveNetNames) { - return "", errors.Errorf("failed to find network interface %q", options.MacVLAN) + // The parent can be defined with --macvlan or as an option (-o parent:device) + parentNetworkDevice := options.MacVLAN + if len(parentNetworkDevice) < 1 { + if parent, ok := options.Options["parent"]; ok { + parentNetworkDevice = parent + } + } + + // Make sure the host-device exists if provided + if len(parentNetworkDevice) > 0 && !util.StringInSlice(parentNetworkDevice, liveNetNames) { + return "", errors.Errorf("failed to find network interface %q", parentNetworkDevice) } if len(name) > 0 { netNames, err := GetNetworkNamesFromFileSystem(runtimeConfig) @@ -275,7 +283,7 @@ func createMacVLAN(name string, options entities.NetworkCreateOptions, runtimeCo } } ncList := NewNcList(name, version.Current(), options.Labels) - macvlan := NewMacVLANPlugin(options.MacVLAN) + macvlan := NewMacVLANPlugin(parentNetworkDevice) plugins = append(plugins, macvlan) ncList["plugins"] = plugins b, err := json.MarshalIndent(ncList, "", " ") diff --git a/libpod/network/netconflist.go b/libpod/network/netconflist.go index 165a9067b..ca6a4a70b 100644 --- a/libpod/network/netconflist.go +++ b/libpod/network/netconflist.go @@ -177,9 +177,13 @@ func NewMacVLANPlugin(device string) MacVLANConfig { m := MacVLANConfig{ PluginType: "macvlan", - Master: device, IPAM: i, } + // CNI is supposed to use the default route if a + // parent device is not provided + if len(device) > 0 { + m.Master = device + } return m } diff --git a/libpod/network/network.go b/libpod/network/network.go index 0fb878b18..0ff14c1f7 100644 --- a/libpod/network/network.go +++ b/libpod/network/network.go @@ -17,11 +17,17 @@ import ( "github.com/sirupsen/logrus" ) -// DefaultNetworkDriver is the default network type used -var DefaultNetworkDriver = "bridge" +var ( + // BridgeNetworkDriver defines the bridge cni driver + BridgeNetworkDriver = "bridge" + // DefaultNetworkDriver is the default network type used + DefaultNetworkDriver = BridgeNetworkDriver + // MacVLANNetworkDriver defines the macvlan cni driver + MacVLANNetworkDriver = "macvlan" +) // SupportedNetworkDrivers describes the list of supported drivers -var SupportedNetworkDrivers = []string{DefaultNetworkDriver} +var SupportedNetworkDrivers = []string{BridgeNetworkDriver, MacVLANNetworkDriver} // isSupportedDriver checks if the user provided driver is supported func isSupportedDriver(driver string) error { diff --git a/libpod/oci_conmon_exec_linux.go b/libpod/oci_conmon_exec_linux.go index dc5dd03df..faf86ea5b 100644 --- a/libpod/oci_conmon_exec_linux.go +++ b/libpod/oci_conmon_exec_linux.go @@ -126,20 +126,25 @@ func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, o }() attachChan := make(chan error) + conmonPipeDataChan := make(chan conmonPipeData) go func() { // attachToExec is responsible for closing pipes - attachChan <- attachExecHTTP(ctr, sessionID, req, w, streams, pipes, detachKeys, options.Terminal, cancel, hijackDone, holdConnOpen) + attachChan <- attachExecHTTP(ctr, sessionID, req, w, streams, pipes, detachKeys, options.Terminal, cancel, hijackDone, holdConnOpen, execCmd, conmonPipeDataChan, ociLog) close(attachChan) }() - // Wait for conmon to succeed, when return. - if err := execCmd.Wait(); err != nil { - return -1, nil, errors.Wrapf(err, "cannot run conmon") - } + // NOTE: the channel is needed to communicate conmon's data. In case + // of an error, the error will be written on the hijacked http + // connection such that remote clients will receive the error. + pipeData := <-conmonPipeDataChan - pid, err := readConmonPipeData(pipes.syncPipe, ociLog) + return pipeData.pid, attachChan, pipeData.err +} - return pid, attachChan, err +// conmonPipeData contains the data when reading from conmon's pipe. +type conmonPipeData struct { + pid int + err error } // ExecContainerDetached executes a command in a running container, but does @@ -488,9 +493,16 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex } // Attach to a container over HTTP -func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool) (deferredErr error) { +func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, execCmd *exec.Cmd, conmonPipeDataChan chan<- conmonPipeData, ociLog string) (deferredErr error) { + // NOTE: As you may notice, the attach code is quite complex. + // Many things happen concurrently and yet are interdependent. + // If you ever change this function, make sure to write to the + // conmonPipeDataChan in case of an error. + if pipes == nil || pipes.startPipe == nil || pipes.attachPipe == nil { - return errors.Wrapf(define.ErrInvalidArg, "must provide a start and attach pipe to finish an exec attach") + err := errors.Wrapf(define.ErrInvalidArg, "must provide a start and attach pipe to finish an exec attach") + conmonPipeDataChan <- conmonPipeData{-1, err} + return err } defer func() { @@ -509,17 +521,20 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp // set up the socket path, such that it is the correct length and location for exec sockPath, err := c.execAttachSocketPath(sessionID) if err != nil { + conmonPipeDataChan <- conmonPipeData{-1, err} return err } // 2: read from attachFd that the parent process has set up the console socket if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil { + conmonPipeDataChan <- conmonPipeData{-1, err} return err } // 2: then attach conn, err := openUnixSocket(sockPath) if err != nil { + conmonPipeDataChan <- conmonPipeData{-1, err} return errors.Wrapf(err, "failed to connect to container's attach socket: %v", sockPath) } defer func() { @@ -540,11 +555,13 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp // Perform hijack hijacker, ok := w.(http.Hijacker) if !ok { + conmonPipeDataChan <- conmonPipeData{-1, err} return errors.Errorf("unable to hijack connection") } httpCon, httpBuf, err := hijacker.Hijack() if err != nil { + conmonPipeDataChan <- conmonPipeData{-1, err} return errors.Wrapf(err, "error hijacking connection") } @@ -555,10 +572,23 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp // Force a flush after the header is written. if err := httpBuf.Flush(); err != nil { + conmonPipeDataChan <- conmonPipeData{-1, err} return errors.Wrapf(err, "error flushing HTTP hijack header") } go func() { + // Wait for conmon to succeed, when return. + if err := execCmd.Wait(); err != nil { + conmonPipeDataChan <- conmonPipeData{-1, err} + } else { + pid, err := readConmonPipeData(pipes.syncPipe, ociLog) + if err != nil { + hijackWriteError(err, c.ID(), isTerminal, httpBuf) + conmonPipeDataChan <- conmonPipeData{pid, err} + } else { + conmonPipeDataChan <- conmonPipeData{pid, err} + } + } // We need to hold the connection open until the complete exec // function has finished. This channel will be closed in a defer // in that function, so we can wait for it here. diff --git a/libpod/rootless_cni_linux.go b/libpod/rootless_cni_linux.go index 9a980750f..94ae062aa 100644 --- a/libpod/rootless_cni_linux.go +++ b/libpod/rootless_cni_linux.go @@ -25,7 +25,7 @@ import ( // Built from ../contrib/rootless-cni-infra. var rootlessCNIInfraImage = map[string]string{ - "amd64": "quay.io/libpod/rootless-cni-infra@sha256:304742d5d221211df4ec672807a5842ff11e3729c50bc424ea0cea858f69d7b7", // 3-amd64 + "amd64": "quay.io/libpod/rootless-cni-infra@sha256:adf352454666f7ce9ca3e1098448b5ee18f89c4516471ec99447ec9ece917f36", // 5-amd64 } const ( @@ -58,9 +58,33 @@ func AllocRootlessCNI(ctx context.Context, c *Container) (ns.NetNS, []*cnitypes. return nil, nil, err } k8sPodName := getCNIPodName(c) // passed to CNI as K8S_POD_NAME + ip := "" + if c.config.StaticIP != nil { + ip = c.config.StaticIP.String() + } + mac := "" + if c.config.StaticMAC != nil { + mac = c.config.StaticMAC.String() + } + aliases, err := c.runtime.state.GetAllNetworkAliases(c) + if err != nil { + return nil, nil, err + } + capArgs := "" + // add network aliases json encoded as capabilityArgs for cni + if len(aliases) > 0 { + capabilityArgs := make(map[string]interface{}) + capabilityArgs["aliases"] = aliases + b, err := json.Marshal(capabilityArgs) + if err != nil { + return nil, nil, err + } + capArgs = string(b) + } + cniResults := make([]*cnitypes.Result, len(networks)) for i, nw := range networks { - cniRes, err := rootlessCNIInfraCallAlloc(infra, c.ID(), nw, k8sPodName) + cniRes, err := rootlessCNIInfraCallAlloc(infra, c.ID(), nw, k8sPodName, ip, mac, capArgs) if err != nil { return nil, nil, err } @@ -137,11 +161,11 @@ func getCNIPodName(c *Container) string { return c.Name() } -func rootlessCNIInfraCallAlloc(infra *Container, id, nw, k8sPodName string) (*cnitypes.Result, error) { - logrus.Debugf("rootless CNI: alloc %q, %q, %q", id, nw, k8sPodName) +func rootlessCNIInfraCallAlloc(infra *Container, id, nw, k8sPodName, ip, mac, capArgs string) (*cnitypes.Result, error) { + logrus.Debugf("rootless CNI: alloc %q, %q, %q, %q, %q, %q", id, nw, k8sPodName, ip, mac, capArgs) var err error - _, err = rootlessCNIInfraExec(infra, "alloc", id, nw, k8sPodName) + _, err = rootlessCNIInfraExec(infra, "alloc", id, nw, k8sPodName, ip, mac, capArgs) if err != nil { return nil, err } diff --git a/libpod/util.go b/libpod/util.go index bf9bf2542..391208fb9 100644 --- a/libpod/util.go +++ b/libpod/util.go @@ -235,20 +235,16 @@ func checkDependencyContainer(depCtr, ctr *Container) error { return nil } -// hijackWriteErrorAndClose writes an error to a hijacked HTTP session and -// closes it. Intended to HTTPAttach function. -// If error is nil, it will not be written; we'll only close the connection. -func hijackWriteErrorAndClose(toWrite error, cid string, terminal bool, httpCon io.Closer, httpBuf *bufio.ReadWriter) { +// hijackWriteError writes an error to a hijacked HTTP session. +func hijackWriteError(toWrite error, cid string, terminal bool, httpBuf *bufio.ReadWriter) { if toWrite != nil { - errString := []byte(fmt.Sprintf("%v\n", toWrite)) + errString := []byte(fmt.Sprintf("Error: %v\n", toWrite)) if !terminal { // We need a header. header := makeHTTPAttachHeader(2, uint32(len(errString))) if _, err := httpBuf.Write(header); err != nil { logrus.Errorf("Error writing header for container %s attach connection error: %v", cid, err) } - // TODO: May want to return immediately here to avoid - // writing garbage to the socket? } if _, err := httpBuf.Write(errString); err != nil { logrus.Errorf("Error writing error to container %s HTTP attach connection: %v", cid, err) @@ -257,6 +253,13 @@ func hijackWriteErrorAndClose(toWrite error, cid string, terminal bool, httpCon logrus.Errorf("Error flushing HTTP buffer for container %s HTTP attach connection: %v", cid, err) } } +} + +// hijackWriteErrorAndClose writes an error to a hijacked HTTP session and +// closes it. Intended to HTTPAttach function. +// If error is nil, it will not be written; we'll only close the connection. +func hijackWriteErrorAndClose(toWrite error, cid string, terminal bool, httpCon io.Closer, httpBuf *bufio.ReadWriter) { + hijackWriteError(toWrite, cid, terminal, httpBuf) if err := httpCon.Close(); err != nil { logrus.Errorf("Error closing container %s HTTP attach connection: %v", cid, err) diff --git a/nix/default.nix b/nix/default.nix index 13b4585ea..7745d8b50 100644 --- a/nix/default.nix +++ b/nix/default.nix @@ -49,9 +49,11 @@ let buildPhase = '' patchShebangs . make bin/podman + make bin/podman-remote ''; installPhase = '' install -Dm755 bin/podman $out/bin/podman + install -Dm755 bin/podman-remote $out/bin/podman-remote ''; }; in self diff --git a/nix/nixpkgs.json b/nix/nixpkgs.json index d304de536..0cfb251f2 100644 --- a/nix/nixpkgs.json +++ b/nix/nixpkgs.json @@ -1,9 +1,9 @@ { "url": "https://github.com/nixos/nixpkgs", - "rev": "4a75203f0270f96cbc87f5dfa5d5185690237d87", - "date": "2020-12-29T03:18:48+01:00", - "path": "/nix/store/scswsm6r4jnhp9ki0f6s81kpj5x6jkn7-nixpkgs", - "sha256": "0h70fm9aa7s06wkalbadw70z5rscbs3p6nblb47z523nhlzgjxk9", + "rev": "ce7b327a52d1b82f82ae061754545b1c54b06c66", + "date": "2021-01-25T11:28:05+01:00", + "path": "/nix/store/dpsa6a1sy8hwhwjkklc52brs9z1k5fx9-nixpkgs", + "sha256": "1rc4if8nmy9lrig0ddihdwpzg2s8y36vf20hfywb8hph5hpsg4vj", "fetchSubmodules": false, "deepClone": false, "leaveDotGit": false diff --git a/pkg/api/handlers/compat/containers.go b/pkg/api/handlers/compat/containers.go index aa12afc82..b41987800 100644 --- a/pkg/api/handlers/compat/containers.go +++ b/pkg/api/handlers/compat/containers.go @@ -22,6 +22,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/go-connections/nat" + "github.com/docker/go-units" "github.com/gorilla/mux" "github.com/gorilla/schema" "github.com/pkg/errors" @@ -31,11 +32,11 @@ import ( func RemoveContainer(w http.ResponseWriter, r *http.Request) { decoder := r.Context().Value("decoder").(*schema.Decoder) query := struct { - All bool `schema:"all"` - Force bool `schema:"force"` - Ignore bool `schema:"ignore"` - Link bool `schema:"link"` - Volumes bool `schema:"v"` + Force bool `schema:"force"` + Ignore bool `schema:"ignore"` + Link bool `schema:"link"` + DockerVolumes bool `schema:"v"` + LibpodVolumes bool `schema:"volumes"` }{ // override any golang type defaults } @@ -46,10 +47,19 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) { return } - if query.Link && !utils.IsLibpodRequest(r) { - utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, - utils.ErrLinkNotSupport) - return + options := entities.RmOptions{ + Force: query.Force, + Ignore: query.Ignore, + } + if utils.IsLibpodRequest(r) { + options.Volumes = query.LibpodVolumes + } else { + if query.Link { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + utils.ErrLinkNotSupport) + return + } + options.Volumes = query.DockerVolumes } runtime := r.Context().Value("runtime").(*libpod.Runtime) @@ -57,12 +67,6 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) { // code. containerEngine := abi.ContainerEngine{Libpod: runtime} name := utils.GetName(r) - options := entities.RmOptions{ - All: query.All, - Force: query.Force, - Volumes: query.Volumes, - Ignore: query.Ignore, - } report, err := containerEngine.ContainerRm(r.Context(), []string{name}, options) if err != nil { if errors.Cause(err) == define.ErrNoSuchCtr { @@ -73,7 +77,7 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } - if report[0].Err != nil { + if len(report) > 0 && report[0].Err != nil { utils.InternalServerError(w, report[0].Err) return } @@ -193,45 +197,48 @@ func KillContainer(w http.ResponseWriter, r *http.Request) { return } - sig, err := signal.ParseSignalNameOrNumber(query.Signal) - if err != nil { - utils.InternalServerError(w, err) - return - } + // Now use the ABI implementation to prevent us from having duplicate + // code. + containerEngine := abi.ContainerEngine{Libpod: runtime} name := utils.GetName(r) - con, err := runtime.LookupContainer(name) - if err != nil { - utils.ContainerNotFound(w, name, err) - return + options := entities.KillOptions{ + Signal: query.Signal, } - - state, err := con.State() + report, err := containerEngine.ContainerKill(r.Context(), []string{name}, options) if err != nil { - utils.InternalServerError(w, err) - return - } + if errors.Cause(err) == define.ErrCtrStateInvalid || + errors.Cause(err) == define.ErrCtrStopped { + utils.Error(w, fmt.Sprintf("Container %s is not running", name), http.StatusConflict, err) + return + } + if errors.Cause(err) == define.ErrNoSuchCtr { + utils.ContainerNotFound(w, name, err) + return + } - // If the Container is stopped already, send a 409 - if state == define.ContainerStateStopped || state == define.ContainerStateExited { - utils.Error(w, fmt.Sprintf("Container %s is not running", name), http.StatusConflict, errors.New(fmt.Sprintf("Cannot kill Container %s, it is not running", name))) + utils.InternalServerError(w, err) return } - signal := uint(sig) - - err = con.Kill(signal) - if err != nil { - utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrapf(err, "unable to kill Container %s", name)) + if len(report) > 0 && report[0].Err != nil { + utils.InternalServerError(w, report[0].Err) return } - // Docker waits for the container to stop if the signal is 0 or // SIGKILL. - if !utils.IsLibpodRequest(r) && (signal == 0 || syscall.Signal(signal) == syscall.SIGKILL) { - if _, err = con.Wait(); err != nil { - utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrapf(err, "failed to wait for Container %s", con.ID())) + if !utils.IsLibpodRequest(r) { + sig, err := signal.ParseSignalNameOrNumber(query.Signal) + if err != nil { + utils.InternalServerError(w, err) return } + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + var opts entities.WaitOptions + if _, err := containerEngine.ContainerWait(r.Context(), []string{name}, opts); err != nil { + utils.Error(w, "Something went wrong.", http.StatusInternalServerError, err) + return + } + } } // Success utils.WriteResponse(w, http.StatusNoContent, nil) @@ -242,6 +249,10 @@ func WaitContainer(w http.ResponseWriter, r *http.Request) { // /{version}/containers/(name)/wait exitCode, err := utils.WaitContainer(w, r) if err != nil { + if errors.Cause(err) == define.ErrNoSuchCtr { + logrus.Warnf("container not found %q: %v", utils.GetName(r), err) + return + } logrus.Warnf("failed to wait on container %q: %v", mux.Vars(r)["name"], err) return } @@ -264,6 +275,7 @@ func LibpodToContainer(l *libpod.Container, sz bool) (*handlers.Container, error sizeRootFs int64 sizeRW int64 state define.ContainerStatus + status string ) if state, err = l.State(); err != nil { @@ -274,6 +286,35 @@ func LibpodToContainer(l *libpod.Container, sz bool) (*handlers.Container, error stateStr = "created" } + if state == define.ContainerStateConfigured || state == define.ContainerStateCreated { + status = "Created" + } else if state == define.ContainerStateStopped || state == define.ContainerStateExited { + exitCode, _, err := l.ExitCode() + if err != nil { + return nil, err + } + finishedTime, err := l.FinishedTime() + if err != nil { + return nil, err + } + status = fmt.Sprintf("Exited (%d) %s ago", exitCode, units.HumanDuration(time.Since(finishedTime))) + } else if state == define.ContainerStateRunning || state == define.ContainerStatePaused { + startedTime, err := l.StartedTime() + if err != nil { + return nil, err + } + status = fmt.Sprintf("Up %s", units.HumanDuration(time.Since(startedTime))) + if state == define.ContainerStatePaused { + status += " (Paused)" + } + } else if state == define.ContainerStateRemoving { + status = "Removal In Progress" + } else if state == define.ContainerStateStopping { + status = "Stopping" + } else { + status = "Unknown" + } + if sz { if sizeRW, err = l.RWSize(); err != nil { return nil, err @@ -295,7 +336,7 @@ func LibpodToContainer(l *libpod.Container, sz bool) (*handlers.Container, error SizeRootFs: sizeRootFs, Labels: l.Labels(), State: stateStr, - Status: "", + Status: status, HostConfig: struct { NetworkMode string `json:",omitempty"` }{ diff --git a/pkg/api/handlers/compat/containers_restart.go b/pkg/api/handlers/compat/containers_restart.go index e8928596a..70edfcbb3 100644 --- a/pkg/api/handlers/compat/containers_restart.go +++ b/pkg/api/handlers/compat/containers_restart.go @@ -4,7 +4,10 @@ import ( "net/http" "github.com/containers/podman/v2/libpod" + "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/pkg/api/handlers/utils" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/containers/podman/v2/pkg/domain/infra/abi" "github.com/gorilla/schema" "github.com/pkg/errors" ) @@ -12,34 +15,49 @@ import ( func RestartContainer(w http.ResponseWriter, r *http.Request) { runtime := r.Context().Value("runtime").(*libpod.Runtime) decoder := r.Context().Value("decoder").(*schema.Decoder) + // Now use the ABI implementation to prevent us from having duplicate + // code. + containerEngine := abi.ContainerEngine{Libpod: runtime} + // /{version}/containers/(name)/restart query := struct { - Timeout int `schema:"t"` + All bool `schema:"all"` + DockerTimeout uint `schema:"t"` + LibpodTimeout uint `schema:"timeout"` }{ - // Override golang default values for types + // override any golang type defaults } if err := decoder.Decode(&query, r.URL.Query()); err != nil { - utils.BadRequest(w, "url", r.URL.String(), errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String())) + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String())) return } name := utils.GetName(r) - con, err := runtime.LookupContainer(name) - if err != nil { - utils.ContainerNotFound(w, name, err) - return - } - timeout := con.StopTimeout() - if _, found := r.URL.Query()["t"]; found { - timeout = uint(query.Timeout) + options := entities.RestartOptions{ + All: query.All, + Timeout: &query.DockerTimeout, + } + if utils.IsLibpodRequest(r) { + options.Timeout = &query.LibpodTimeout } + report, err := containerEngine.ContainerRestart(r.Context(), []string{name}, options) + if err != nil { + if errors.Cause(err) == define.ErrNoSuchCtr { + utils.ContainerNotFound(w, name, err) + return + } - if err := con.RestartWithTimeout(r.Context(), timeout); err != nil { utils.InternalServerError(w, err) return } + if len(report) > 0 && report[0].Err != nil { + utils.InternalServerError(w, report[0].Err) + return + } + // Success utils.WriteResponse(w, http.StatusNoContent, nil) } diff --git a/pkg/api/handlers/compat/containers_stop.go b/pkg/api/handlers/compat/containers_stop.go index 8bc58cf59..000685aa0 100644 --- a/pkg/api/handlers/compat/containers_stop.go +++ b/pkg/api/handlers/compat/containers_stop.go @@ -6,6 +6,8 @@ import ( "github.com/containers/podman/v2/libpod" "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/pkg/api/handlers/utils" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/containers/podman/v2/pkg/domain/infra/abi" "github.com/gorilla/schema" "github.com/pkg/errors" ) @@ -13,10 +15,15 @@ import ( func StopContainer(w http.ResponseWriter, r *http.Request) { runtime := r.Context().Value("runtime").(*libpod.Runtime) decoder := r.Context().Value("decoder").(*schema.Decoder) + // Now use the ABI implementation to prevent us from having duplicate + // code. + containerEngine := abi.ContainerEngine{Libpod: runtime} // /{version}/containers/(name)/stop query := struct { - Timeout int `schema:"t"` + Ignore bool `schema:"ignore"` + DockerTimeout uint `schema:"t"` + LibpodTimeout uint `schema:"timeout"` }{ // override any golang type defaults } @@ -27,31 +34,46 @@ func StopContainer(w http.ResponseWriter, r *http.Request) { } name := utils.GetName(r) + + options := entities.StopOptions{ + Ignore: query.Ignore, + } + if utils.IsLibpodRequest(r) { + if query.LibpodTimeout > 0 { + options.Timeout = &query.LibpodTimeout + } + } else { + if query.DockerTimeout > 0 { + options.Timeout = &query.DockerTimeout + } + } con, err := runtime.LookupContainer(name) if err != nil { utils.ContainerNotFound(w, name, err) return } - state, err := con.State() if err != nil { - utils.InternalServerError(w, errors.Wrapf(err, "unable to get state for Container %s", name)) + utils.InternalServerError(w, err) return } - // If the Container is stopped already, send a 304 if state == define.ContainerStateStopped || state == define.ContainerStateExited { utils.WriteResponse(w, http.StatusNotModified, nil) return } + report, err := containerEngine.ContainerStop(r.Context(), []string{name}, options) + if err != nil { + if errors.Cause(err) == define.ErrNoSuchCtr { + utils.ContainerNotFound(w, name, err) + return + } - var stopError error - if query.Timeout > 0 { - stopError = con.StopWithTimeout(uint(query.Timeout)) - } else { - stopError = con.Stop() + utils.InternalServerError(w, err) + return } - if stopError != nil { - utils.InternalServerError(w, errors.Wrapf(stopError, "failed to stop %s", name)) + + if len(report) > 0 && report[0].Err != nil { + utils.InternalServerError(w, report[0].Err) return } diff --git a/pkg/api/handlers/compat/images_push.go b/pkg/api/handlers/compat/images_push.go index 0f3da53e8..c352ac6cd 100644 --- a/pkg/api/handlers/compat/images_push.go +++ b/pkg/api/handlers/compat/images_push.go @@ -3,13 +3,15 @@ package compat import ( "context" "net/http" - "os" "strings" + "github.com/containers/image/v5/types" "github.com/containers/podman/v2/libpod" - "github.com/containers/podman/v2/libpod/image" "github.com/containers/podman/v2/pkg/api/handlers/utils" "github.com/containers/podman/v2/pkg/auth" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/containers/podman/v2/pkg/domain/infra/abi" + "github.com/containers/storage" "github.com/gorilla/schema" "github.com/pkg/errors" ) @@ -18,11 +20,20 @@ import ( func PushImage(w http.ResponseWriter, r *http.Request) { decoder := r.Context().Value("decoder").(*schema.Decoder) runtime := r.Context().Value("runtime").(*libpod.Runtime) + // Now use the ABI implementation to prevent us from having duplicate + // code. + imageEngine := abi.ImageEngine{Libpod: runtime} query := struct { - Tag string `schema:"tag"` + All bool `schema:"all"` + Compress bool `schema:"compress"` + Destination string `schema:"destination"` + Format string `schema:"format"` + TLSVerify bool `schema:"tlsVerify"` + Tag string `schema:"tag"` }{ // This is where you can override the golang default value for one of fields + TLSVerify: true, } if err := decoder.Decode(&query, r.URL.Query()); err != nil { @@ -43,39 +54,34 @@ func PushImage(w http.ResponseWriter, r *http.Request) { return } - newImage, err := runtime.ImageRuntime().NewFromLocal(imageName) - if err != nil { - utils.ImageNotFound(w, imageName, errors.Wrapf(err, "failed to find image %s", imageName)) - return - } - - authConf, authfile, key, err := auth.GetCredentials(r) + authconf, authfile, key, err := auth.GetCredentials(r) if err != nil { utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "failed to parse %q header for %s", key, r.URL.String())) return } defer auth.RemoveAuthfile(authfile) - - dockerRegistryOptions := &image.DockerRegistryOptions{DockerRegistryCreds: authConf} - if sys := runtime.SystemContext(); sys != nil { - dockerRegistryOptions.DockerCertPath = sys.DockerCertPath - dockerRegistryOptions.RegistriesConfPath = sys.SystemRegistriesConfPath + var username, password string + if authconf != nil { + username = authconf.Username + password = authconf.Password + } + options := entities.ImagePushOptions{ + All: query.All, + Authfile: authfile, + Compress: query.Compress, + Format: query.Format, + Password: password, + Username: username, } + if _, found := r.URL.Query()["tlsVerify"]; found { + options.SkipTLSVerify = types.NewOptionalBool(!query.TLSVerify) + } + if err := imageEngine.Push(context.Background(), imageName, query.Destination, options); err != nil { + if errors.Cause(err) != storage.ErrImageUnknown { + utils.ImageNotFound(w, imageName, errors.Wrapf(err, "failed to find image %s", imageName)) + return + } - err = newImage.PushImageToHeuristicDestination( - context.Background(), - imageName, - "", // manifest type - authfile, - "", // digest file - "", // signature policy - os.Stderr, - false, // force compression - image.SigningOptions{}, - dockerRegistryOptions, - nil, // additional tags - ) - if err != nil { utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "error pushing image %q", imageName)) return } diff --git a/pkg/api/handlers/libpod/containers.go b/pkg/api/handlers/libpod/containers.go index 6b07b1cc5..f6e348cef 100644 --- a/pkg/api/handlers/libpod/containers.go +++ b/pkg/api/handlers/libpod/containers.go @@ -12,7 +12,6 @@ import ( "github.com/containers/podman/v2/pkg/api/handlers/utils" "github.com/containers/podman/v2/pkg/domain/entities" "github.com/containers/podman/v2/pkg/domain/infra/abi" - "github.com/containers/podman/v2/pkg/ps" "github.com/gorilla/schema" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -63,6 +62,7 @@ func ListContainers(w http.ResponseWriter, r *http.Request) { decoder := r.Context().Value("decoder").(*schema.Decoder) query := struct { All bool `schema:"all"` + External bool `schema:"external"` Filters map[string][]string `schema:"filters"` Last int `schema:"last"` // alias for limit Limit int `schema:"limit"` @@ -90,17 +90,22 @@ func ListContainers(w http.ResponseWriter, r *http.Request) { } runtime := r.Context().Value("runtime").(*libpod.Runtime) + // Now use the ABI implementation to prevent us from having duplicate + // code. + containerEngine := abi.ContainerEngine{Libpod: runtime} opts := entities.ContainerListOptions{ All: query.All, + External: query.External, Filters: query.Filters, Last: limit, - Size: query.Size, - Sort: "", Namespace: query.Namespace, - Pod: true, - Sync: query.Sync, + // Always return Pod, should not be part of the API. + // https://github.com/containers/podman/pull/7223 + Pod: true, + Size: query.Size, + Sync: query.Sync, } - pss, err := ps.GetContainerLists(runtime, opts) + pss, err := containerEngine.ContainerList(r.Context(), opts) if err != nil { utils.InternalServerError(w, err) return @@ -143,6 +148,12 @@ func GetContainer(w http.ResponseWriter, r *http.Request) { func WaitContainer(w http.ResponseWriter, r *http.Request) { exitCode, err := utils.WaitContainer(w, r) if err != nil { + name := utils.GetName(r) + if errors.Cause(err) == define.ErrNoSuchCtr { + utils.ContainerNotFound(w, name, err) + return + } + logrus.Warnf("failed to wait on container %q: %v", name, err) return } utils.WriteResponse(w, http.StatusOK, strconv.Itoa(int(exitCode))) diff --git a/pkg/api/handlers/libpod/manifests.go b/pkg/api/handlers/libpod/manifests.go index 35221ecf1..ded51a31f 100644 --- a/pkg/api/handlers/libpod/manifests.go +++ b/pkg/api/handlers/libpod/manifests.go @@ -147,7 +147,6 @@ func ManifestPush(w http.ResponseWriter, r *http.Request) { query := struct { All bool `schema:"all"` Destination string `schema:"destination"` - Format string `schema:"format"` TLSVerify bool `schema:"tlsVerify"` }{ // Add defaults here once needed. @@ -163,24 +162,21 @@ func ManifestPush(w http.ResponseWriter, r *http.Request) { } source := utils.GetName(r) - authConf, authfile, key, err := auth.GetCredentials(r) + authconf, authfile, key, err := auth.GetCredentials(r) if err != nil { utils.Error(w, "failed to retrieve repository credentials", http.StatusBadRequest, errors.Wrapf(err, "failed to parse %q header for %s", key, r.URL.String())) return } defer auth.RemoveAuthfile(authfile) var username, password string - if authConf != nil { - username = authConf.Username - password = authConf.Password - + if authconf != nil { + username = authconf.Username + password = authconf.Password } - options := entities.ImagePushOptions{ Authfile: authfile, Username: username, Password: password, - Format: query.Format, All: query.All, } if sys := runtime.SystemContext(); sys != nil { diff --git a/pkg/api/handlers/utils/containers.go b/pkg/api/handlers/utils/containers.go index 1439a3a75..fac237f87 100644 --- a/pkg/api/handlers/utils/containers.go +++ b/pkg/api/handlers/utils/containers.go @@ -6,6 +6,8 @@ import ( "github.com/containers/podman/v2/libpod" "github.com/containers/podman/v2/libpod/define" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/containers/podman/v2/pkg/domain/infra/abi" "github.com/gorilla/schema" "github.com/pkg/errors" ) @@ -16,10 +18,13 @@ func WaitContainer(w http.ResponseWriter, r *http.Request) (int32, error) { interval time.Duration ) runtime := r.Context().Value("runtime").(*libpod.Runtime) + // Now use the ABI implementation to prevent us from having duplicate + // code. + containerEngine := abi.ContainerEngine{Libpod: runtime} decoder := r.Context().Value("decoder").(*schema.Decoder) query := struct { - Interval string `schema:"interval"` - Condition string `schema:"condition"` + Interval string `schema:"interval"` + Condition define.ContainerStatus `schema:"condition"` }{ // Override golang default values for types } @@ -27,6 +32,10 @@ func WaitContainer(w http.ResponseWriter, r *http.Request) (int32, error) { Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String())) return 0, err } + options := entities.WaitOptions{ + Condition: define.ContainerStateStopped, + } + name := GetName(r) if _, found := r.URL.Query()["interval"]; found { interval, err = time.ParseDuration(query.Interval) if err != nil { @@ -40,19 +49,19 @@ func WaitContainer(w http.ResponseWriter, r *http.Request) (int32, error) { return 0, err } } - condition := define.ContainerStateStopped + options.Interval = interval + if _, found := r.URL.Query()["condition"]; found { - condition, err = define.StringToContainerStatus(query.Condition) - if err != nil { - InternalServerError(w, err) - return 0, err - } + options.Condition = query.Condition } - name := GetName(r) - con, err := runtime.LookupContainer(name) + + report, err := containerEngine.ContainerWait(r.Context(), []string{name}, options) if err != nil { - ContainerNotFound(w, name, err) return 0, err } - return con.WaitForConditionWithInterval(interval, condition) + if len(report) == 0 { + InternalServerError(w, errors.New("No reports returned")) + return 0, err + } + return report[0].ExitCode, report[0].Error } diff --git a/pkg/api/server/register_containers.go b/pkg/api/server/register_containers.go index 74a04b2e6..ff1781d1e 100644 --- a/pkg/api/server/register_containers.go +++ b/pkg/api/server/register_containers.go @@ -48,6 +48,11 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // default: false // description: Return all containers. By default, only running containers are shown // - in: query + // name: external + // type: boolean + // default: false + // description: Return containers in storage not controlled by Podman + // - in: query // name: limit // description: Return this number of most recently created containers, including non-running ones. // type: integer @@ -194,6 +199,11 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // required: true // description: the name or ID of the container // - in: query + // name: all + // type: boolean + // default: false + // description: Send kill signal to all containers + // - in: query // name: signal // type: string // default: TERM @@ -481,6 +491,11 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // - paused // - running // - stopped + // - in: query + // name: interval + // type: string + // default: "250ms" + // description: Time Interval to wait before polling for completion. // produces: // - application/json // responses: @@ -1214,9 +1229,20 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // required: true // description: the name or ID of the container // - in: query - // name: t + // name: all + // type: boolean + // default: false + // description: Stop all containers + // - in: query + // name: timeout // type: integer + // default: 10 // description: number of seconds to wait before killing container + // - in: query + // name: Ignore + // type: boolean + // default: false + // description: do not return error if container is already stopped // produces: // - application/json // responses: diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go index d76f811e9..2ce0829b4 100644 --- a/pkg/api/server/register_images.go +++ b/pkg/api/server/register_images.go @@ -235,6 +235,18 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // name: tag // type: string // description: The tag to associate with the image on the registry. + // - in: query + // name: all + // type: boolean + // description: All indicates whether to push all images related to the image list + // - in: query + // name: compress + // type: boolean + // description: use compression on image + // - in: query + // name: destination + // type: string + // description: destination name for the image being pushed // - in: header // name: X-Registry-Auth // type: string diff --git a/pkg/bindings/containers/containers.go b/pkg/bindings/containers/containers.go index 40fcfbded..8e644b712 100644 --- a/pkg/bindings/containers/containers.go +++ b/pkg/bindings/containers/containers.go @@ -5,7 +5,6 @@ import ( "io" "net/http" "net/url" - "strconv" "strings" "github.com/containers/podman/v2/libpod/define" @@ -83,18 +82,9 @@ func Remove(ctx context.Context, nameOrID string, options *RemoveOptions) error if err != nil { return err } - params := url.Values{} - if v := options.GetVolumes(); options.Changed("Volumes") { - params.Set("v", strconv.FormatBool(v)) - } - if all := options.GetAll(); options.Changed("All") { - params.Set("all", strconv.FormatBool(all)) - } - if force := options.GetForce(); options.Changed("Force") { - params.Set("force", strconv.FormatBool(force)) - } - if ignore := options.GetIgnore(); options.Changed("Ignore") { - params.Set("ignore", strconv.FormatBool(ignore)) + params, err := options.ToParams() + if err != nil { + return err } response, err := conn.DoRequest(nil, http.MethodDelete, "/containers/%s", params, nil, nameOrID) if err != nil { @@ -130,7 +120,7 @@ func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*de // Kill sends a given signal to a given container. The signal should be the string // representation of a signal like 'SIGKILL'. The nameOrID can be a container name // or a partial/full ID -func Kill(ctx context.Context, nameOrID string, sig string, options *KillOptions) error { +func Kill(ctx context.Context, nameOrID string, options *KillOptions) error { if options == nil { options = new(KillOptions) } @@ -142,7 +132,6 @@ func Kill(ctx context.Context, nameOrID string, sig string, options *KillOptions if err != nil { return err } - params.Set("signal", sig) response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/kill", params, nil, nameOrID) if err != nil { return err @@ -180,9 +169,9 @@ func Restart(ctx context.Context, nameOrID string, options *RestartOptions) erro if err != nil { return err } - params := url.Values{} - if options.Changed("Timeout") { - params.Set("t", strconv.Itoa(options.GetTimeout())) + params, err := options.ToParams() + if err != nil { + return err } response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/restart", params, nil, nameOrID) if err != nil { @@ -335,9 +324,9 @@ func Wait(ctx context.Context, nameOrID string, options *WaitOptions) (int32, er if err != nil { return exitCode, err } - params := url.Values{} - if options.Changed("Condition") { - params.Set("condition", options.GetCondition().String()) + params, err := options.ToParams() + if err != nil { + return exitCode, err } response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/wait", params, nil, nameOrID) if err != nil { diff --git a/pkg/bindings/containers/types.go b/pkg/bindings/containers/types.go index 24604fa83..771cde72c 100644 --- a/pkg/bindings/containers/types.go +++ b/pkg/bindings/containers/types.go @@ -106,6 +106,7 @@ type MountedContainerPathsOptions struct{} // ListOptions are optional options for listing containers type ListOptions struct { All *bool + External *bool Filters map[string][]string Last *int Namespace *bool @@ -122,7 +123,6 @@ type PruneOptions struct { //go:generate go run ../generator/generator.go RemoveOptions // RemoveOptions are optional options for removing containers type RemoveOptions struct { - All *bool Ignore *bool Force *bool Volumes *bool @@ -137,6 +137,7 @@ type InspectOptions struct { //go:generate go run ../generator/generator.go KillOptions // KillOptions are optional options for killing containers type KillOptions struct { + Signal *string } //go:generate go run ../generator/generator.go PauseOptions @@ -176,11 +177,13 @@ type UnpauseOptions struct{} // WaitOptions are optional options for waiting on containers type WaitOptions struct { Condition *define.ContainerStatus + Interval *string } //go:generate go run ../generator/generator.go StopOptions // StopOptions are optional options for stopping containers type StopOptions struct { + Ignore *bool Timeout *uint } diff --git a/pkg/bindings/containers/types_kill_options.go b/pkg/bindings/containers/types_kill_options.go index dd84f0d9f..c5d5a3c6a 100644 --- a/pkg/bindings/containers/types_kill_options.go +++ b/pkg/bindings/containers/types_kill_options.go @@ -86,3 +86,19 @@ func (o *KillOptions) ToParams() (url.Values, error) { } return params, nil } + +// WithSignal +func (o *KillOptions) WithSignal(value string) *KillOptions { + v := &value + o.Signal = v + return o +} + +// GetSignal +func (o *KillOptions) GetSignal() string { + var signal string + if o.Signal == nil { + return signal + } + return *o.Signal +} diff --git a/pkg/bindings/containers/types_list_options.go b/pkg/bindings/containers/types_list_options.go index 43326fa59..c363dcd32 100644 --- a/pkg/bindings/containers/types_list_options.go +++ b/pkg/bindings/containers/types_list_options.go @@ -103,6 +103,22 @@ func (o *ListOptions) GetAll() bool { return *o.All } +// WithExternal +func (o *ListOptions) WithExternal(value bool) *ListOptions { + v := &value + o.External = v + return o +} + +// GetExternal +func (o *ListOptions) GetExternal() bool { + var external bool + if o.External == nil { + return external + } + return *o.External +} + // WithFilters func (o *ListOptions) WithFilters(value map[string][]string) *ListOptions { v := value diff --git a/pkg/bindings/containers/types_remove_options.go b/pkg/bindings/containers/types_remove_options.go index 3ef32fa03..ffe1488c1 100644 --- a/pkg/bindings/containers/types_remove_options.go +++ b/pkg/bindings/containers/types_remove_options.go @@ -87,22 +87,6 @@ func (o *RemoveOptions) ToParams() (url.Values, error) { return params, nil } -// WithAll -func (o *RemoveOptions) WithAll(value bool) *RemoveOptions { - v := &value - o.All = v - return o -} - -// GetAll -func (o *RemoveOptions) GetAll() bool { - var all bool - if o.All == nil { - return all - } - return *o.All -} - // WithIgnore func (o *RemoveOptions) WithIgnore(value bool) *RemoveOptions { v := &value diff --git a/pkg/bindings/containers/types_stop_options.go b/pkg/bindings/containers/types_stop_options.go index db692dbf0..940ec5832 100644 --- a/pkg/bindings/containers/types_stop_options.go +++ b/pkg/bindings/containers/types_stop_options.go @@ -87,6 +87,22 @@ func (o *StopOptions) ToParams() (url.Values, error) { return params, nil } +// WithIgnore +func (o *StopOptions) WithIgnore(value bool) *StopOptions { + v := &value + o.Ignore = v + return o +} + +// GetIgnore +func (o *StopOptions) GetIgnore() bool { + var ignore bool + if o.Ignore == nil { + return ignore + } + return *o.Ignore +} + // WithTimeout func (o *StopOptions) WithTimeout(value uint) *StopOptions { v := &value diff --git a/pkg/bindings/containers/types_wait_options.go b/pkg/bindings/containers/types_wait_options.go index 470d67611..2f5aa983e 100644 --- a/pkg/bindings/containers/types_wait_options.go +++ b/pkg/bindings/containers/types_wait_options.go @@ -103,3 +103,19 @@ func (o *WaitOptions) GetCondition() define.ContainerStatus { } return *o.Condition } + +// WithInterval +func (o *WaitOptions) WithInterval(value string) *WaitOptions { + v := &value + o.Interval = v + return o +} + +// GetInterval +func (o *WaitOptions) GetInterval() string { + var interval string + if o.Interval == nil { + return interval + } + return *o.Interval +} diff --git a/pkg/bindings/images/types.go b/pkg/bindings/images/types.go index 0248f2fa6..7bf70c82b 100644 --- a/pkg/bindings/images/types.go +++ b/pkg/bindings/images/types.go @@ -2,7 +2,6 @@ package images import ( "github.com/containers/buildah/imagebuildah" - "github.com/containers/common/pkg/config" ) //go:generate go run ../generator/generator.go RemoveOptions @@ -104,37 +103,16 @@ type PushOptions struct { // Authfile is the path to the authentication file. Ignored for remote // calls. Authfile *string - // CertDir is the path to certificate directories. Ignored for remote - // calls. - CertDir *string - // Compress tarball image layers when pushing to a directory using the 'dir' - // transport. Default is same compression type as source. Ignored for remote - // calls. + // Compress tarball image layers when pushing to a directory using the 'dir' transport. Compress *bool - // Username for authenticating against the registry. - Username *string + // Manifest type of the pushed image + Format *string // Password for authenticating against the registry. Password *string - // DigestFile, after copying the image, write the digest of the resulting - // image to the file. Ignored for remote calls. - DigestFile *string - // Format is the Manifest type (oci, v2s1, or v2s2) to use when pushing an - // image using the 'dir' transport. Default is manifest type of source. - // Ignored for remote calls. - Format *string - // Quiet can be specified to suppress pull progress when pulling. Ignored - // for remote calls. - Quiet *bool - // RemoveSignatures, discard any pre-existing signatures in the image. - // Ignored for remote calls. - RemoveSignatures *bool - // SignaturePolicy to use when pulling. Ignored for remote calls. - SignaturePolicy *string - // SignBy adds a signature at the destination using the specified key. - // Ignored for remote calls. - SignBy *string // SkipTLSVerify to skip HTTPS and certificate verification. SkipTLSVerify *bool + // Username for authenticating against the registry. + Username *string } //go:generate go run ../generator/generator.go SearchOptions @@ -161,32 +139,25 @@ type PullOptions struct { // AllTags can be specified to pull all tags of an image. Note // that this only works if the image does not include a tag. AllTags *bool + // Arch will overwrite the local architecture for image pulls. + Arch *string // Authfile is the path to the authentication file. Ignored for remote // calls. Authfile *string - // CertDir is the path to certificate directories. Ignored for remote - // calls. - CertDir *string - // Username for authenticating against the registry. - Username *string - // Password for authenticating against the registry. - Password *string - // Arch will overwrite the local architecture for image pulls. - Arch *string // OS will overwrite the local operating system (OS) for image // pulls. OS *string - // Variant will overwrite the local variant for image pulls. - Variant *string + // Password for authenticating against the registry. + Password *string // Quiet can be specified to suppress pull progress when pulling. Ignored // for remote calls. Quiet *bool - // SignaturePolicy to use when pulling. Ignored for remote calls. - SignaturePolicy *string // SkipTLSVerify to skip HTTPS and certificate verification. SkipTLSVerify *bool - // PullPolicy whether to pull new image - PullPolicy *config.PullPolicy + // Username for authenticating against the registry. + Username *string + // Variant will overwrite the local variant for image pulls. + Variant *string } //BuildOptions are optional options for building images diff --git a/pkg/bindings/images/types_pull_options.go b/pkg/bindings/images/types_pull_options.go index 2bdf2b66e..5452560fb 100644 --- a/pkg/bindings/images/types_pull_options.go +++ b/pkg/bindings/images/types_pull_options.go @@ -6,7 +6,6 @@ import ( "strconv" "strings" - "github.com/containers/common/pkg/config" jsoniter "github.com/json-iterator/go" "github.com/pkg/errors" ) @@ -104,70 +103,6 @@ func (o *PullOptions) GetAllTags() bool { return *o.AllTags } -// WithAuthfile -func (o *PullOptions) WithAuthfile(value string) *PullOptions { - v := &value - o.Authfile = v - return o -} - -// GetAuthfile -func (o *PullOptions) GetAuthfile() string { - var authfile string - if o.Authfile == nil { - return authfile - } - return *o.Authfile -} - -// WithCertDir -func (o *PullOptions) WithCertDir(value string) *PullOptions { - v := &value - o.CertDir = v - return o -} - -// GetCertDir -func (o *PullOptions) GetCertDir() string { - var certDir string - if o.CertDir == nil { - return certDir - } - return *o.CertDir -} - -// WithUsername -func (o *PullOptions) WithUsername(value string) *PullOptions { - v := &value - o.Username = v - return o -} - -// GetUsername -func (o *PullOptions) GetUsername() string { - var username string - if o.Username == nil { - return username - } - return *o.Username -} - -// WithPassword -func (o *PullOptions) WithPassword(value string) *PullOptions { - v := &value - o.Password = v - return o -} - -// GetPassword -func (o *PullOptions) GetPassword() string { - var password string - if o.Password == nil { - return password - } - return *o.Password -} - // WithArch func (o *PullOptions) WithArch(value string) *PullOptions { v := &value @@ -184,6 +119,22 @@ func (o *PullOptions) GetArch() string { return *o.Arch } +// WithAuthfile +func (o *PullOptions) WithAuthfile(value string) *PullOptions { + v := &value + o.Authfile = v + return o +} + +// GetAuthfile +func (o *PullOptions) GetAuthfile() string { + var authfile string + if o.Authfile == nil { + return authfile + } + return *o.Authfile +} + // WithOS func (o *PullOptions) WithOS(value string) *PullOptions { v := &value @@ -200,20 +151,20 @@ func (o *PullOptions) GetOS() string { return *o.OS } -// WithVariant -func (o *PullOptions) WithVariant(value string) *PullOptions { +// WithPassword +func (o *PullOptions) WithPassword(value string) *PullOptions { v := &value - o.Variant = v + o.Password = v return o } -// GetVariant -func (o *PullOptions) GetVariant() string { - var variant string - if o.Variant == nil { - return variant +// GetPassword +func (o *PullOptions) GetPassword() string { + var password string + if o.Password == nil { + return password } - return *o.Variant + return *o.Password } // WithQuiet @@ -232,22 +183,6 @@ func (o *PullOptions) GetQuiet() bool { return *o.Quiet } -// WithSignaturePolicy -func (o *PullOptions) WithSignaturePolicy(value string) *PullOptions { - v := &value - o.SignaturePolicy = v - return o -} - -// GetSignaturePolicy -func (o *PullOptions) GetSignaturePolicy() string { - var signaturePolicy string - if o.SignaturePolicy == nil { - return signaturePolicy - } - return *o.SignaturePolicy -} - // WithSkipTLSVerify func (o *PullOptions) WithSkipTLSVerify(value bool) *PullOptions { v := &value @@ -264,18 +199,34 @@ func (o *PullOptions) GetSkipTLSVerify() bool { return *o.SkipTLSVerify } -// WithPullPolicy -func (o *PullOptions) WithPullPolicy(value config.PullPolicy) *PullOptions { +// WithUsername +func (o *PullOptions) WithUsername(value string) *PullOptions { + v := &value + o.Username = v + return o +} + +// GetUsername +func (o *PullOptions) GetUsername() string { + var username string + if o.Username == nil { + return username + } + return *o.Username +} + +// WithVariant +func (o *PullOptions) WithVariant(value string) *PullOptions { v := &value - o.PullPolicy = v + o.Variant = v return o } -// GetPullPolicy -func (o *PullOptions) GetPullPolicy() config.PullPolicy { - var pullPolicy config.PullPolicy - if o.PullPolicy == nil { - return pullPolicy +// GetVariant +func (o *PullOptions) GetVariant() string { + var variant string + if o.Variant == nil { + return variant } - return *o.PullPolicy + return *o.Variant } diff --git a/pkg/bindings/images/types_push_options.go b/pkg/bindings/images/types_push_options.go index 0c12ce4ac..b7d8a6f2d 100644 --- a/pkg/bindings/images/types_push_options.go +++ b/pkg/bindings/images/types_push_options.go @@ -119,22 +119,6 @@ func (o *PushOptions) GetAuthfile() string { return *o.Authfile } -// WithCertDir -func (o *PushOptions) WithCertDir(value string) *PushOptions { - v := &value - o.CertDir = v - return o -} - -// GetCertDir -func (o *PushOptions) GetCertDir() string { - var certDir string - if o.CertDir == nil { - return certDir - } - return *o.CertDir -} - // WithCompress func (o *PushOptions) WithCompress(value bool) *PushOptions { v := &value @@ -151,54 +135,6 @@ func (o *PushOptions) GetCompress() bool { return *o.Compress } -// WithUsername -func (o *PushOptions) WithUsername(value string) *PushOptions { - v := &value - o.Username = v - return o -} - -// GetUsername -func (o *PushOptions) GetUsername() string { - var username string - if o.Username == nil { - return username - } - return *o.Username -} - -// WithPassword -func (o *PushOptions) WithPassword(value string) *PushOptions { - v := &value - o.Password = v - return o -} - -// GetPassword -func (o *PushOptions) GetPassword() string { - var password string - if o.Password == nil { - return password - } - return *o.Password -} - -// WithDigestFile -func (o *PushOptions) WithDigestFile(value string) *PushOptions { - v := &value - o.DigestFile = v - return o -} - -// GetDigestFile -func (o *PushOptions) GetDigestFile() string { - var digestFile string - if o.DigestFile == nil { - return digestFile - } - return *o.DigestFile -} - // WithFormat func (o *PushOptions) WithFormat(value string) *PushOptions { v := &value @@ -215,68 +151,20 @@ func (o *PushOptions) GetFormat() string { return *o.Format } -// WithQuiet -func (o *PushOptions) WithQuiet(value bool) *PushOptions { - v := &value - o.Quiet = v - return o -} - -// GetQuiet -func (o *PushOptions) GetQuiet() bool { - var quiet bool - if o.Quiet == nil { - return quiet - } - return *o.Quiet -} - -// WithRemoveSignatures -func (o *PushOptions) WithRemoveSignatures(value bool) *PushOptions { - v := &value - o.RemoveSignatures = v - return o -} - -// GetRemoveSignatures -func (o *PushOptions) GetRemoveSignatures() bool { - var removeSignatures bool - if o.RemoveSignatures == nil { - return removeSignatures - } - return *o.RemoveSignatures -} - -// WithSignaturePolicy -func (o *PushOptions) WithSignaturePolicy(value string) *PushOptions { - v := &value - o.SignaturePolicy = v - return o -} - -// GetSignaturePolicy -func (o *PushOptions) GetSignaturePolicy() string { - var signaturePolicy string - if o.SignaturePolicy == nil { - return signaturePolicy - } - return *o.SignaturePolicy -} - -// WithSignBy -func (o *PushOptions) WithSignBy(value string) *PushOptions { +// WithPassword +func (o *PushOptions) WithPassword(value string) *PushOptions { v := &value - o.SignBy = v + o.Password = v return o } -// GetSignBy -func (o *PushOptions) GetSignBy() string { - var signBy string - if o.SignBy == nil { - return signBy +// GetPassword +func (o *PushOptions) GetPassword() string { + var password string + if o.Password == nil { + return password } - return *o.SignBy + return *o.Password } // WithSkipTLSVerify @@ -294,3 +182,19 @@ func (o *PushOptions) GetSkipTLSVerify() bool { } return *o.SkipTLSVerify } + +// WithUsername +func (o *PushOptions) WithUsername(value string) *PushOptions { + v := &value + o.Username = v + return o +} + +// GetUsername +func (o *PushOptions) GetUsername() string { + var username string + if o.Username == nil { + return username + } + return *o.Username +} diff --git a/pkg/bindings/manifests/manifests.go b/pkg/bindings/manifests/manifests.go index fec9832a0..4634dd442 100644 --- a/pkg/bindings/manifests/manifests.go +++ b/pkg/bindings/manifests/manifests.go @@ -153,7 +153,6 @@ func Push(ctx context.Context, name, destination string, options *images.PushOpt } params.Set("image", name) params.Set("destination", destination) - params.Set("format", *options.Format) _, err = conn.DoRequest(nil, http.MethodPost, "/manifests/%s/push", params, nil, name) if err != nil { return "", err diff --git a/pkg/bindings/test/containers_test.go b/pkg/bindings/test/containers_test.go index 3d7526cb8..9b9f98047 100644 --- a/pkg/bindings/test/containers_test.go +++ b/pkg/bindings/test/containers_test.go @@ -443,7 +443,7 @@ var _ = Describe("Podman containers ", func() { It("podman kill bogus container", func() { // Killing bogus container should return 404 - err := containers.Kill(bt.conn, "foobar", "SIGTERM", nil) + err := containers.Kill(bt.conn, "foobar", new(containers.KillOptions).WithSignal("SIGTERM")) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) @@ -454,7 +454,7 @@ var _ = Describe("Podman containers ", func() { var name = "top" _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) - err = containers.Kill(bt.conn, name, "SIGINT", nil) + err = containers.Kill(bt.conn, name, new(containers.KillOptions).WithSignal("SIGINT")) Expect(err).To(BeNil()) _, err = containers.Exists(bt.conn, name, nil) Expect(err).To(BeNil()) @@ -465,7 +465,7 @@ var _ = Describe("Podman containers ", func() { var name = "top" cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) - err = containers.Kill(bt.conn, cid, "SIGTERM", nil) + err = containers.Kill(bt.conn, cid, new(containers.KillOptions).WithSignal("SIGTERM")) Expect(err).To(BeNil()) _, err = containers.Exists(bt.conn, cid, nil) Expect(err).To(BeNil()) @@ -476,7 +476,7 @@ var _ = Describe("Podman containers ", func() { var name = "top" cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) - err = containers.Kill(bt.conn, cid, "SIGKILL", nil) + err = containers.Kill(bt.conn, cid, new(containers.KillOptions).WithSignal("SIGKILL")) Expect(err).To(BeNil()) }) @@ -485,7 +485,7 @@ var _ = Describe("Podman containers ", func() { var name = "top" cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) - err = containers.Kill(bt.conn, cid, "foobar", nil) + err = containers.Kill(bt.conn, cid, new(containers.KillOptions).WithSignal("foobar")) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -501,7 +501,7 @@ var _ = Describe("Podman containers ", func() { Expect(err).To(BeNil()) containerLatestList, err := containers.List(bt.conn, new(containers.ListOptions).WithLast(1)) Expect(err).To(BeNil()) - err = containers.Kill(bt.conn, containerLatestList[0].Names[0], "SIGTERM", nil) + err = containers.Kill(bt.conn, containerLatestList[0].Names[0], new(containers.KillOptions).WithSignal("SIGTERM")) Expect(err).To(BeNil()) }) diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go index c200dd01a..285fd093a 100644 --- a/pkg/cgroups/cgroups.go +++ b/pkg/cgroups/cgroups.go @@ -24,6 +24,7 @@ var ( ErrCgroupDeleted = errors.New("cgroup deleted") // ErrCgroupV1Rootless means the cgroup v1 were attempted to be used in rootless environment ErrCgroupV1Rootless = errors.New("no support for CGroups V1 in rootless environments") + ErrStatCgroup = errors.New("no cgroup available for gathering user statistics") ) // CgroupControl controls a cgroup hierarchy @@ -525,10 +526,19 @@ func (c *CgroupControl) AddPid(pid int) error { // Stat returns usage statistics for the cgroup func (c *CgroupControl) Stat() (*Metrics, error) { m := Metrics{} + found := false for _, h := range handlers { if err := h.Stat(c, &m); err != nil { - return nil, err + if !os.IsNotExist(errors.Cause(err)) { + return nil, err + } + logrus.Warningf("Failed to retrieve cgroup stats: %v", err) + continue } + found = true + } + if !found { + return nil, ErrStatCgroup } return &m, nil } diff --git a/pkg/cgroups/cgroups_test.go b/pkg/cgroups/cgroups_test.go new file mode 100644 index 000000000..54315f7be --- /dev/null +++ b/pkg/cgroups/cgroups_test.go @@ -0,0 +1,32 @@ +package cgroups + +import ( + "testing" + + "github.com/containers/podman/v2/pkg/rootless" + spec "github.com/opencontainers/runtime-spec/specs-go" +) + +func TestCreated(t *testing.T) { + // tests only works in rootless mode + if rootless.IsRootless() { + return + } + + var resources spec.LinuxResources + cgr, err := New("machine.slice", &resources) + if err != nil { + t.Error(err) + } + if err := cgr.Delete(); err != nil { + t.Error(err) + } + + cgr, err = NewSystemd("machine.slice") + if err != nil { + t.Error(err) + } + if err := cgr.Delete(); err != nil { + t.Error(err) + } +} diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go index 4c1bd6a7d..63be5578f 100644 --- a/pkg/domain/entities/containers.go +++ b/pkg/domain/entities/containers.go @@ -81,11 +81,10 @@ type PauseUnpauseReport struct { } type StopOptions struct { - All bool - CIDFiles []string - Ignore bool - Latest bool - Timeout *uint + All bool + Ignore bool + Latest bool + Timeout *uint } type StopReport struct { @@ -104,10 +103,9 @@ type TopOptions struct { } type KillOptions struct { - All bool - Latest bool - Signal string - CIDFiles []string + All bool + Latest bool + Signal string } type KillReport struct { @@ -297,8 +295,8 @@ type ContainerListOptions struct { Pod bool Quiet bool Size bool + External bool Sort string - Storage bool Sync bool Watch uint } diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go index 48a32817d..d0599a595 100644 --- a/pkg/domain/infra/abi/containers.go +++ b/pkg/domain/infra/abi/containers.go @@ -6,7 +6,6 @@ import ( "io/ioutil" "os" "strconv" - "strings" "sync" "time" @@ -139,14 +138,6 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st } func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []string, options entities.StopOptions) ([]*entities.StopReport, error) { names := namesOrIds - for _, cidFile := range options.CIDFiles { - content, err := ioutil.ReadFile(cidFile) - if err != nil { - return nil, errors.Wrap(err, "error reading CIDFile") - } - id := strings.Split(string(content), "\n")[0] - names = append(names, id) - } ctrs, err := getContainersByContext(options.All, options.Latest, names, ic.Libpod) if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) { return nil, err @@ -202,14 +193,6 @@ func (ic *ContainerEngine) ContainerPrune(ctx context.Context, options entities. } func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []string, options entities.KillOptions) ([]*entities.KillReport, error) { - for _, cidFile := range options.CIDFiles { - content, err := ioutil.ReadFile(cidFile) - if err != nil { - return nil, errors.Wrap(err, "error reading CIDFile") - } - id := strings.Split(string(content), "\n")[0] - namesOrIds = append(namesOrIds, id) - } sig, err := signal.ParseSignalNameOrNumber(options.Signal) if err != nil { return nil, err diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go index 524b29553..e9c513f8e 100644 --- a/pkg/domain/infra/tunnel/containers.go +++ b/pkg/domain/infra/tunnel/containers.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "strconv" "strings" @@ -41,7 +40,7 @@ func (ic *ContainerEngine) ContainerWait(ctx context.Context, namesOrIds []strin return nil, err } responses := make([]entities.WaitReport, 0, len(cons)) - options := new(containers.WaitOptions).WithCondition(opts.Condition) + options := new(containers.WaitOptions).WithCondition(opts.Condition).WithInterval(opts.Interval.String()) for _, c := range cons { response := entities.WaitReport{Id: c.ID} exitCode, err := containers.Wait(ic.ClientCtx, c.ID, options) @@ -83,19 +82,11 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []string, opts entities.StopOptions) ([]*entities.StopReport, error) { reports := []*entities.StopReport{} - for _, cidFile := range opts.CIDFiles { - content, err := ioutil.ReadFile(cidFile) - if err != nil { - return nil, errors.Wrap(err, "error reading CIDFile") - } - id := strings.Split(string(content), "\n")[0] - namesOrIds = append(namesOrIds, id) - } ctrs, err := getContainersByContext(ic.ClientCtx, opts.All, opts.Ignore, namesOrIds) if err != nil { return nil, err } - options := new(containers.StopOptions) + options := new(containers.StopOptions).WithIgnore(opts.Ignore) if to := opts.Timeout; to != nil { options.WithTimeout(*to) } @@ -126,23 +117,16 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin } func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []string, opts entities.KillOptions) ([]*entities.KillReport, error) { - for _, cidFile := range opts.CIDFiles { - content, err := ioutil.ReadFile(cidFile) - if err != nil { - return nil, errors.Wrap(err, "error reading CIDFile") - } - id := strings.Split(string(content), "\n")[0] - namesOrIds = append(namesOrIds, id) - } ctrs, err := getContainersByContext(ic.ClientCtx, opts.All, false, namesOrIds) if err != nil { return nil, err } + options := new(containers.KillOptions).WithSignal(opts.Signal) reports := make([]*entities.KillReport, 0, len(ctrs)) for _, c := range ctrs { reports = append(reports, &entities.KillReport{ Id: c.ID, - Err: containers.Kill(ic.ClientCtx, c.ID, opts.Signal, nil), + Err: containers.Kill(ic.ClientCtx, c.ID, options), }) } return reports, nil @@ -173,19 +157,32 @@ func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []st } func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, opts entities.RmOptions) ([]*entities.RmReport, error) { - ctrs, err := getContainersByContext(ic.ClientCtx, opts.All, opts.Ignore, namesOrIds) - if err != nil { - return nil, err - } // TODO there is no endpoint for container eviction. Need to discuss - reports := make([]*entities.RmReport, 0, len(ctrs)) - options := new(containers.RemoveOptions).WithForce(opts.Force).WithVolumes(opts.Volumes) - for _, c := range ctrs { + options := new(containers.RemoveOptions).WithForce(opts.Force).WithVolumes(opts.Volumes).WithIgnore(opts.Ignore) + + if opts.All { + ctrs, err := getContainersByContext(ic.ClientCtx, opts.All, opts.Ignore, namesOrIds) + if err != nil { + return nil, err + } + reports := make([]*entities.RmReport, 0, len(ctrs)) + for _, c := range ctrs { + reports = append(reports, &entities.RmReport{ + Id: c.ID, + Err: containers.Remove(ic.ClientCtx, c.ID, options), + }) + } + return reports, nil + } + + reports := make([]*entities.RmReport, 0, len(namesOrIds)) + for _, name := range namesOrIds { reports = append(reports, &entities.RmReport{ - Id: c.ID, - Err: containers.Remove(ic.ClientCtx, c.ID, options), + Id: name, + Err: containers.Remove(ic.ClientCtx, name, options), }) } + return reports, nil } @@ -601,7 +598,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri func (ic *ContainerEngine) ContainerList(ctx context.Context, opts entities.ContainerListOptions) ([]entities.ListContainer, error) { options := new(containers.ListOptions).WithFilters(opts.Filters).WithAll(opts.All).WithLast(opts.Last) - options.WithNamespace(opts.Namespace).WithSize(opts.Size).WithSync(opts.Sync) + options.WithNamespace(opts.Namespace).WithSize(opts.Size).WithSync(opts.Sync).WithExternal(opts.External) return containers.List(ic.ClientCtx, options) } diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go index 0de756756..f10c8c175 100644 --- a/pkg/domain/infra/tunnel/images.go +++ b/pkg/domain/infra/tunnel/images.go @@ -106,8 +106,9 @@ func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOption func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, opts entities.ImagePullOptions) (*entities.ImagePullReport, error) { options := new(images.PullOptions) - options.WithAllTags(opts.AllTags).WithAuthfile(opts.Authfile).WithCertDir(opts.CertDir).WithArch(opts.Arch).WithOS(opts.OS) - options.WithVariant(opts.Variant).WithPassword(opts.Password).WithPullPolicy(opts.PullPolicy) + options.WithAllTags(opts.AllTags).WithAuthfile(opts.Authfile).WithArch(opts.Arch).WithOS(opts.OS) + options.WithVariant(opts.Variant).WithPassword(opts.Password) + options.WithQuiet(opts.Quiet).WithUsername(opts.Username) if s := opts.SkipTLSVerify; s != types.OptionalBoolUndefined { if s == types.OptionalBoolTrue { options.WithSkipTLSVerify(true) @@ -115,7 +116,6 @@ func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, opts entities. options.WithSkipTLSVerify(false) } } - options.WithQuiet(opts.Quiet).WithSignaturePolicy(opts.SignaturePolicy).WithUsername(opts.Username) pulledImages, err := images.Pull(ir.ClientCtx, rawImage, options) if err != nil { return nil, err @@ -236,10 +236,7 @@ func (ir *ImageEngine) Import(ctx context.Context, opts entities.ImageImportOpti func (ir *ImageEngine) Push(ctx context.Context, source string, destination string, opts entities.ImagePushOptions) error { options := new(images.PushOptions) - options.WithUsername(opts.Username).WithSignaturePolicy(opts.SignaturePolicy).WithQuiet(opts.Quiet) - options.WithPassword(opts.Password).WithCertDir(opts.CertDir).WithAuthfile(opts.Authfile) - options.WithCompress(opts.Compress).WithDigestFile(opts.DigestFile).WithFormat(opts.Format) - options.WithRemoveSignatures(opts.RemoveSignatures).WithSignBy(opts.SignBy) + options.WithAll(opts.All).WithCompress(opts.Compress).WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile).WithFormat(opts.Format) if s := opts.SkipTLSVerify; s != types.OptionalBoolUndefined { if s == types.OptionalBoolTrue { diff --git a/pkg/domain/infra/tunnel/manifest.go b/pkg/domain/infra/tunnel/manifest.go index c12ba0045..e261afee2 100644 --- a/pkg/domain/infra/tunnel/manifest.go +++ b/pkg/domain/infra/tunnel/manifest.go @@ -86,10 +86,8 @@ func (ir *ImageEngine) ManifestRemove(ctx context.Context, names []string) (stri // ManifestPush pushes a manifest list or image index to the destination func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination string, opts entities.ImagePushOptions) (string, error) { options := new(images.PushOptions) - options.WithUsername(opts.Username).WithSignaturePolicy(opts.SignaturePolicy).WithQuiet(opts.Quiet) - options.WithPassword(opts.Password).WithCertDir(opts.CertDir).WithAuthfile(opts.Authfile) - options.WithCompress(opts.Compress).WithDigestFile(opts.DigestFile).WithFormat(opts.Format) - options.WithRemoveSignatures(opts.RemoveSignatures).WithSignBy(opts.SignBy) + options.WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile) + options.WithAll(opts.All) if s := opts.SkipTLSVerify; s != types.OptionalBoolUndefined { if s == types.OptionalBoolTrue { diff --git a/pkg/ps/ps.go b/pkg/ps/ps.go index dc577890a..42f9e1d39 100644 --- a/pkg/ps/ps.go +++ b/pkg/ps/ps.go @@ -69,7 +69,7 @@ func GetContainerLists(runtime *libpod.Runtime, options entities.ContainerListOp pss = append(pss, listCon) } - if options.All && options.Storage { + if options.All && options.External { externCons, err := runtime.StorageContainers() if err != nil { return nil, err diff --git a/pkg/specgen/container_validate.go b/pkg/specgen/container_validate.go index a0d36f865..81cb8b78d 100644 --- a/pkg/specgen/container_validate.go +++ b/pkg/specgen/container_validate.go @@ -30,7 +30,7 @@ func exclusiveOptions(opt1, opt2 string) error { // input for creating a container. func (s *SpecGenerator) Validate() error { - if rootless.IsRootless() { + if rootless.IsRootless() && len(s.CNINetworks) == 0 { if s.StaticIP != nil || s.StaticIPv6 != nil { return ErrNoStaticIPRootless } diff --git a/pkg/specgen/generate/kube/kube.go b/pkg/specgen/generate/kube/kube.go index e39a700eb..0d7ee3ad2 100644 --- a/pkg/specgen/generate/kube/kube.go +++ b/pkg/specgen/generate/kube/kube.go @@ -3,6 +3,7 @@ package kube import ( "context" "fmt" + "net" "strings" "github.com/containers/common/pkg/parse" @@ -44,6 +45,31 @@ func ToPodGen(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec) podPorts := getPodPorts(podYAML.Spec.Containers) p.PortMappings = podPorts + if dnsConfig := podYAML.Spec.DNSConfig; dnsConfig != nil { + // name servers + if dnsServers := dnsConfig.Nameservers; len(dnsServers) > 0 { + servers := make([]net.IP, 0) + for _, server := range dnsServers { + servers = append(servers, net.ParseIP(server)) + } + p.DNSServer = servers + } + // search domans + if domains := dnsConfig.Searches; len(domains) > 0 { + p.DNSSearch = domains + } + // dns options + if options := dnsConfig.Options; len(options) > 0 { + dnsOptions := make([]string, 0) + for _, opts := range options { + d := opts.Name + if opts.Value != nil { + d += ":" + *opts.Value + } + dnsOptions = append(dnsOptions, d) + } + } + } return p, nil } diff --git a/pkg/specgen/pod_validate.go b/pkg/specgen/pod_validate.go index 7c81f3f9f..518adb32f 100644 --- a/pkg/specgen/pod_validate.go +++ b/pkg/specgen/pod_validate.go @@ -20,7 +20,7 @@ func exclusivePodOptions(opt1, opt2 string) error { // Validate verifies the input is valid func (p *PodSpecGenerator) Validate() error { - if rootless.IsRootless() { + if rootless.IsRootless() && len(p.CNINetworks) == 0 { if p.StaticIP != nil { return ErrNoStaticIPRootless } diff --git a/pkg/systemd/generate/common.go b/pkg/systemd/generate/common.go index de6751a17..e9902319c 100644 --- a/pkg/systemd/generate/common.go +++ b/pkg/systemd/generate/common.go @@ -60,13 +60,21 @@ func filterPodFlags(command []string) []string { return processed } -// quoteArguments makes sure that all arguments with at least one whitespace +// escapeSystemdArguments makes sure that all arguments with at least one whitespace // are quoted to make sure those are interpreted as one argument instead of -// multiple ones. -func quoteArguments(command []string) []string { +// multiple ones. Also make sure to escape all characters which have a special +// meaning to systemd -> $,% and \ +// see: https://www.freedesktop.org/software/systemd/man/systemd.service.html#Command%20lines +func escapeSystemdArguments(command []string) []string { for i := range command { + command[i] = strings.ReplaceAll(command[i], "$", "$$") + command[i] = strings.ReplaceAll(command[i], "%", "%%") if strings.ContainsAny(command[i], " \t") { command[i] = strconv.Quote(command[i]) + } else if strings.Contains(command[i], `\`) { + // strconv.Quote also escapes backslashes so + // we should replace only if strconv.Quote was not used + command[i] = strings.ReplaceAll(command[i], `\`, `\\`) } } return command diff --git a/pkg/systemd/generate/common_test.go b/pkg/systemd/generate/common_test.go index d0ec5637c..a0691d1ad 100644 --- a/pkg/systemd/generate/common_test.go +++ b/pkg/systemd/generate/common_test.go @@ -29,7 +29,7 @@ func TestFilterPodFlags(t *testing.T) { } } -func TestQuoteArguments(t *testing.T) { +func TestEscapeSystemdArguments(t *testing.T) { tests := []struct { input []string output []string @@ -46,10 +46,46 @@ func TestQuoteArguments(t *testing.T) { []string{"foo", "bar=\"arg with\ttab\""}, []string{"foo", "\"bar=\\\"arg with\\ttab\\\"\""}, }, + { + []string{"$"}, + []string{"$$"}, + }, + { + []string{"foo", "command with dollar sign $"}, + []string{"foo", "\"command with dollar sign $$\""}, + }, + { + []string{"foo", "command with two dollar signs $$"}, + []string{"foo", "\"command with two dollar signs $$$$\""}, + }, + { + []string{"%"}, + []string{"%%"}, + }, + { + []string{"foo", "command with percent sign %"}, + []string{"foo", "\"command with percent sign %%\""}, + }, + { + []string{"foo", "command with two percent signs %%"}, + []string{"foo", "\"command with two percent signs %%%%\""}, + }, + { + []string{`\`}, + []string{`\\`}, + }, + { + []string{"foo", `command with backslash \`}, + []string{"foo", `"command with backslash \\"`}, + }, + { + []string{"foo", `command with two backslashs \\`}, + []string{"foo", `"command with two backslashs \\\\"`}, + }, } for _, test := range tests { - quoted := quoteArguments(test.input) + quoted := escapeSystemdArguments(test.input) assert.Equal(t, test.output, quoted) } } diff --git a/pkg/systemd/generate/containers.go b/pkg/systemd/generate/containers.go index 5f52b0a77..abe159812 100644 --- a/pkg/systemd/generate/containers.go +++ b/pkg/systemd/generate/containers.go @@ -204,7 +204,7 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst startCommand := []string{info.Executable} if index > 2 { // include root flags - info.RootFlags = strings.Join(quoteArguments(info.CreateCommand[1:index-1]), " ") + info.RootFlags = strings.Join(escapeSystemdArguments(info.CreateCommand[1:index-1]), " ") startCommand = append(startCommand, info.CreateCommand[1:index-1]...) } startCommand = append(startCommand, @@ -279,7 +279,7 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst } } startCommand = append(startCommand, remainingCmd...) - startCommand = quoteArguments(startCommand) + startCommand = escapeSystemdArguments(startCommand) info.ExecStartPre = "/bin/rm -f {{{{.PIDFile}}}} {{{{.ContainerIDFile}}}}" info.ExecStart = strings.Join(startCommand, " ") diff --git a/pkg/systemd/generate/containers_test.go b/pkg/systemd/generate/containers_test.go index 96d95644b..be14e4c28 100644 --- a/pkg/systemd/generate/containers_test.go +++ b/pkg/systemd/generate/containers_test.go @@ -352,6 +352,30 @@ Type=forking [Install] WantedBy=multi-user.target default.target ` + + goodNewWithSpecialChars := `# jadda-jadda.service +# autogenerated by Podman CI + +[Unit] +Description=Podman jadda-jadda.service +Documentation=man:podman-generate-systemd(1) +Wants=network.target +After=network-online.target + +[Service] +Environment=PODMAN_SYSTEMD_UNIT=%n +Restart=always +TimeoutStopSec=70 +ExecStartPre=/bin/rm -f %t/jadda-jadda.pid %t/jadda-jadda.ctr-id +ExecStart=/usr/bin/podman run --conmon-pidfile %t/jadda-jadda.pid --cidfile %t/jadda-jadda.ctr-id --cgroups=no-conmon -d --replace --name test awesome-image:latest sh -c "kill $$$$ && echo %%\\" +ExecStop=/usr/bin/podman stop --ignore --cidfile %t/jadda-jadda.ctr-id -t 10 +ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/jadda-jadda.ctr-id +PIDFile=%t/jadda-jadda.pid +Type=forking + +[Install] +WantedBy=multi-user.target default.target +` tests := []struct { name string info containerInfo @@ -647,6 +671,22 @@ WantedBy=multi-user.target default.target true, false, }, + {"good with special chars", + containerInfo{ + Executable: "/usr/bin/podman", + ServiceName: "jadda-jadda", + ContainerNameOrID: "jadda-jadda", + RestartPolicy: "always", + PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid", + StopTimeout: 10, + PodmanVersion: "CI", + CreateCommand: []string{"I'll get stripped", "create", "--name", "test", "awesome-image:latest", "sh", "-c", "kill $$ && echo %\\"}, + EnvVariable: EnvVariable, + }, + goodNewWithSpecialChars, + true, + false, + }, } for _, tt := range tests { test := tt diff --git a/pkg/systemd/generate/pods.go b/pkg/systemd/generate/pods.go index c7e3aa955..d6ede19af 100644 --- a/pkg/systemd/generate/pods.go +++ b/pkg/systemd/generate/pods.go @@ -269,7 +269,7 @@ func executePodTemplate(info *podInfo, options entities.GenerateSystemdOptions) return "", errors.Errorf("pod does not appear to be created via `podman pod create`: %v", info.CreateCommand) } podRootArgs = info.CreateCommand[1 : podCreateIndex-1] - info.RootFlags = strings.Join(quoteArguments(podRootArgs), " ") + info.RootFlags = strings.Join(escapeSystemdArguments(podRootArgs), " ") podCreateArgs = filterPodFlags(info.CreateCommand[podCreateIndex+1:]) } // We're hard-coding the first five arguments and append the @@ -306,7 +306,7 @@ func executePodTemplate(info *podInfo, options entities.GenerateSystemdOptions) } startCommand = append(startCommand, podCreateArgs...) - startCommand = quoteArguments(startCommand) + startCommand = escapeSystemdArguments(startCommand) info.ExecStartPre1 = "/bin/rm -f {{{{.PIDFile}}}} {{{{.PodIDFile}}}}" info.ExecStartPre2 = strings.Join(startCommand, " ") diff --git a/pkg/util/mountOpts.go b/pkg/util/mountOpts.go index 580aaf4f2..b3a38f286 100644 --- a/pkg/util/mountOpts.go +++ b/pkg/util/mountOpts.go @@ -86,6 +86,10 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' or 'notmpcopyup' option can only be set once") } foundCopyUp = true + case "consistency": + // Often used on MACs and mistakenly on Linux platforms. + // Since Docker ignores this option so shall we. + continue case "notmpcopyup": if !isTmpfs { return nil, errors.Wrapf(ErrBadMntOption, "the 'notmpcopyup' option is only allowed with tmpfs mounts") diff --git a/test/apiv2/20-containers.at b/test/apiv2/20-containers.at index decdc4754..0da196e46 100644 --- a/test/apiv2/20-containers.at +++ b/test/apiv2/20-containers.at @@ -237,3 +237,12 @@ t GET containers/$cid/json 200 \ t DELETE containers/$cid 204 t DELETE images/${MultiTagName}?force=true 200 # vim: filetype=sh + +# Test Volumes field adds an anonymous volume +t POST containers/create '"Image":"'$IMAGE'","Volumes":{"/test":{}}' 201 \ + .Id~[0-9a-f]\\{64\\} +cid=$(jq -r '.Id' <<<"$output") +t GET containers/$cid/json 200 \ + .Mounts[0].Destination="/test" + +t DELETE containers/$cid?v=true 204 diff --git a/test/apiv2/rest_api/test_rest_v2_0_0.py b/test/apiv2/rest_api/test_rest_v2_0_0.py index 77674e81b..c4faa1548 100644 --- a/test/apiv2/rest_api/test_rest_v2_0_0.py +++ b/test/apiv2/rest_api/test_rest_v2_0_0.py @@ -1,7 +1,6 @@ import json import os import random -import shutil import string import subprocess import sys @@ -357,6 +356,7 @@ class TestApi(unittest.TestCase): def test_search_compat(self): url = PODMAN_URL + "/v1.40/images/search" + # Had issues with this test hanging when repositories not happy def do_search1(): payload = {'term': 'alpine'} @@ -619,6 +619,53 @@ class TestApi(unittest.TestCase): # self.assertIn(img["Id"], prune_payload["ImagesDeleted"][1]["Deleted"]) self.assertIsNotNone(prune_payload["ImagesDeleted"][1]["Deleted"]) + def test_status_compat(self): + r = requests.post(PODMAN_URL + "/v1.40/containers/create?name=topcontainer", + json={"Cmd": ["top"], "Image": "alpine:latest"}) + self.assertEqual(r.status_code, 201, r.text) + payload = json.loads(r.text) + container_id = payload["Id"] + self.assertIsNotNone(container_id) + + r = requests.get(PODMAN_URL + "/v1.40/containers/json", + params={'all': 'true', 'filters': f'{{"id":["{container_id}"]}}'}) + self.assertEqual(r.status_code, 200, r.text) + payload = json.loads(r.text) + self.assertEqual(payload[0]["Status"], "Created") + + r = requests.post(PODMAN_URL + f"/v1.40/containers/{container_id}/start") + self.assertEqual(r.status_code, 204, r.text) + + r = requests.get(PODMAN_URL + "/v1.40/containers/json", + params={'all': 'true', 'filters': f'{{"id":["{container_id}"]}}'}) + self.assertEqual(r.status_code, 200, r.text) + payload = json.loads(r.text) + self.assertTrue(str(payload[0]["Status"]).startswith("Up")) + + r = requests.post(PODMAN_URL + f"/v1.40/containers/{container_id}/pause") + self.assertEqual(r.status_code, 204, r.text) + + r = requests.get(PODMAN_URL + "/v1.40/containers/json", + params={'all': 'true', 'filters': f'{{"id":["{container_id}"]}}'}) + self.assertEqual(r.status_code, 200, r.text) + payload = json.loads(r.text) + self.assertTrue(str(payload[0]["Status"]).startswith("Up")) + self.assertTrue(str(payload[0]["Status"]).endswith("(Paused)")) + + r = requests.post(PODMAN_URL + f"/v1.40/containers/{container_id}/unpause") + self.assertEqual(r.status_code, 204, r.text) + r = requests.post(PODMAN_URL + f"/v1.40/containers/{container_id}/stop") + self.assertEqual(r.status_code, 204, r.text) + + r = requests.get(PODMAN_URL + "/v1.40/containers/json", + params={'all': 'true', 'filters': f'{{"id":["{container_id}"]}}'}) + self.assertEqual(r.status_code, 200, r.text) + payload = json.loads(r.text) + self.assertTrue(str(payload[0]["Status"]).startswith("Exited")) + + r = requests.delete(PODMAN_URL + f"/v1.40/containers/{container_id}") + self.assertEqual(r.status_code, 204, r.text) + if __name__ == "__main__": unittest.main() diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go index 2668b1e7b..ffa6f1329 100644 --- a/test/e2e/common_test.go +++ b/test/e2e/common_test.go @@ -1,6 +1,7 @@ package integration import ( + "bytes" "fmt" "io/ioutil" "math/rand" @@ -10,6 +11,7 @@ import ( "sort" "strconv" "strings" + "sync" "testing" "time" @@ -84,6 +86,7 @@ type testResultsSortedLength struct{ testResultsSorted } func (a testResultsSorted) Less(i, j int) bool { return a[i].length < a[j].length } var testResults []testResult +var testResultsMutex sync.Mutex func TestMain(m *testing.M) { if reexec.Init() { @@ -349,7 +352,9 @@ func (p *PodmanTestIntegration) InspectContainer(name string) []define.InspectCo func processTestResult(f GinkgoTestDescription) { tr := testResult{length: f.Duration.Seconds(), name: f.TestText} + testResultsMutex.Lock() testResults = append(testResults, tr) + testResultsMutex.Unlock() } func GetPortLock(port string) storage.Locker { @@ -790,3 +795,12 @@ func (p *PodmanTestIntegration) removeCNINetwork(name string) { session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(BeNumerically("<=", 1)) } + +func (p *PodmanSessionIntegration) jq(jqCommand string) (string, error) { + var out bytes.Buffer + cmd := exec.Command("jq", jqCommand) + cmd.Stdin = strings.NewReader(p.OutputToString()) + cmd.Stdout = &out + err := cmd.Run() + return strings.TrimRight(out.String(), "\n"), err +} diff --git a/test/e2e/create_staticip_test.go b/test/e2e/create_staticip_test.go index 7a2267617..698bbf976 100644 --- a/test/e2e/create_staticip_test.go +++ b/test/e2e/create_staticip_test.go @@ -49,7 +49,7 @@ var _ = Describe("Podman create with --ip flag", func() { }) It("Podman create --ip with non-allocatable IP", func() { - SkipIfRootless("--ip is not supported in rootless mode") + SkipIfRootless("--ip not supported without network in rootless mode") result := podmanTest.Podman([]string{"create", "--name", "test", "--ip", "203.0.113.124", ALPINE, "ls"}) result.WaitWithDefaultTimeout() Expect(result.ExitCode()).To(Equal(0)) @@ -63,7 +63,7 @@ var _ = Describe("Podman create with --ip flag", func() { ip := GetRandomIPAddress() result := podmanTest.Podman([]string{"create", "--name", "test", "--ip", ip, ALPINE, "ip", "addr"}) result.WaitWithDefaultTimeout() - // Rootless static ip assignment should error + // Rootless static ip assignment without network should error if rootless.IsRootless() { Expect(result.ExitCode()).To(Equal(125)) } else { @@ -81,7 +81,7 @@ var _ = Describe("Podman create with --ip flag", func() { }) It("Podman create two containers with the same IP", func() { - SkipIfRootless("--ip not supported in rootless mode") + SkipIfRootless("--ip not supported without network in rootless mode") ip := GetRandomIPAddress() result := podmanTest.Podman([]string{"create", "--name", "test1", "--ip", ip, ALPINE, "sleep", "999"}) result.WaitWithDefaultTimeout() diff --git a/test/e2e/create_staticmac_test.go b/test/e2e/create_staticmac_test.go index 1ac431da2..4c8f371a4 100644 --- a/test/e2e/create_staticmac_test.go +++ b/test/e2e/create_staticmac_test.go @@ -56,11 +56,7 @@ var _ = Describe("Podman run with --mac-address flag", func() { result := podmanTest.Podman([]string{"run", "--network", net, "--mac-address", "92:d0:c6:00:29:34", ALPINE, "ip", "addr"}) result.WaitWithDefaultTimeout() - if rootless.IsRootless() { - Expect(result.ExitCode()).To(Equal(125)) - } else { - Expect(result.ExitCode()).To(Equal(0)) - Expect(result.OutputToString()).To(ContainSubstring("92:d0:c6:00:29:34")) - } + Expect(result.ExitCode()).To(Equal(0)) + Expect(result.OutputToString()).To(ContainSubstring("92:d0:c6:00:29:34")) }) }) diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go index 73d92e5a0..67c08ac09 100644 --- a/test/e2e/create_test.go +++ b/test/e2e/create_test.go @@ -553,7 +553,7 @@ var _ = Describe("Podman create", func() { }) It("create container in pod with IP should fail", func() { - SkipIfRootless("Setting IP not supported in rootless mode") + SkipIfRootless("Setting IP not supported in rootless mode without network") name := "createwithstaticip" pod := podmanTest.RunTopContainerInPod("", "new:"+name) pod.WaitWithDefaultTimeout() @@ -565,7 +565,7 @@ var _ = Describe("Podman create", func() { }) It("create container in pod with mac should fail", func() { - SkipIfRootless("Setting MAC Address not supported in rootless mode") + SkipIfRootless("Setting MAC Address not supported in rootless mode without network") name := "createwithstaticmac" pod := podmanTest.RunTopContainerInPod("", "new:"+name) pod.WaitWithDefaultTimeout() diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go index 239817e6c..83b9cfb14 100644 --- a/test/e2e/generate_kube_test.go +++ b/test/e2e/generate_kube_test.go @@ -60,6 +60,7 @@ var _ = Describe("Podman generate kube", func() { pod := new(v1.Pod) err := yaml.Unmarshal(kube.Out.Contents(), pod) Expect(err).To(BeNil()) + Expect(pod.Spec.HostNetwork).To(Equal(false)) numContainers := 0 for range pod.Spec.Containers { @@ -144,6 +145,7 @@ var _ = Describe("Podman generate kube", func() { pod := new(v1.Pod) err := yaml.Unmarshal(kube.Out.Contents(), pod) Expect(err).To(BeNil()) + Expect(pod.Spec.HostNetwork).To(Equal(false)) numContainers := 0 for range pod.Spec.Containers { @@ -152,6 +154,40 @@ var _ = Describe("Podman generate kube", func() { Expect(numContainers).To(Equal(1)) }) + It("podman generate kube on pod with host network", func() { + podSession := podmanTest.Podman([]string{"pod", "create", "--name", "testHostNetwork", "--network", "host"}) + podSession.WaitWithDefaultTimeout() + Expect(podSession.ExitCode()).To(Equal(0)) + + session := podmanTest.Podman([]string{"create", "--name", "topcontainer", "--pod", "testHostNetwork", "--network", "host", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + kube := podmanTest.Podman([]string{"generate", "kube", "testHostNetwork"}) + kube.WaitWithDefaultTimeout() + Expect(kube.ExitCode()).To(Equal(0)) + + pod := new(v1.Pod) + err := yaml.Unmarshal(kube.Out.Contents(), pod) + Expect(err).To(BeNil()) + Expect(pod.Spec.HostNetwork).To(Equal(true)) + }) + + It("podman generate kube on container with host network", func() { + session := podmanTest.RunTopContainerWithArgs("topcontainer", []string{"--network", "host"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + kube := podmanTest.Podman([]string{"generate", "kube", "topcontainer"}) + kube.WaitWithDefaultTimeout() + Expect(kube.ExitCode()).To(Equal(0)) + + pod := new(v1.Pod) + err := yaml.Unmarshal(kube.Out.Contents(), pod) + Expect(err).To(BeNil()) + Expect(pod.Spec.HostNetwork).To(Equal(true)) + }) + It("podman generate kube on pod with hostAliases", func() { podName := "testHost" testIP := "127.0.0.1" @@ -540,4 +576,67 @@ var _ = Describe("Podman generate kube", func() { kube.WaitWithDefaultTimeout() Expect(kube.ExitCode()).ToNot(Equal(0)) }) + + It("podman generate kube on a container with dns options", func() { + top := podmanTest.Podman([]string{"run", "-dt", "--name", "top", "--dns", "8.8.8.8", "--dns-search", "foobar.com", "--dns-opt", "color:blue", ALPINE, "top"}) + top.WaitWithDefaultTimeout() + Expect(top.ExitCode()).To(BeZero()) + + kube := podmanTest.Podman([]string{"generate", "kube", "top"}) + kube.WaitWithDefaultTimeout() + Expect(kube.ExitCode()).To(Equal(0)) + + pod := new(v1.Pod) + err := yaml.Unmarshal(kube.Out.Contents(), pod) + Expect(err).To(BeNil()) + + Expect(StringInSlice("8.8.8.8", pod.Spec.DNSConfig.Nameservers)).To(BeTrue()) + Expect(StringInSlice("foobar.com", pod.Spec.DNSConfig.Searches)).To(BeTrue()) + Expect(len(pod.Spec.DNSConfig.Options)).To(BeNumerically(">", 0)) + Expect(pod.Spec.DNSConfig.Options[0].Name).To(Equal("color")) + Expect(*pod.Spec.DNSConfig.Options[0].Value).To(Equal("blue")) + }) + + It("podman generate kube multiple contianer dns servers and options are cumulative", func() { + top1 := podmanTest.Podman([]string{"run", "-dt", "--name", "top1", "--dns", "8.8.8.8", "--dns-search", "foobar.com", ALPINE, "top"}) + top1.WaitWithDefaultTimeout() + Expect(top1.ExitCode()).To(BeZero()) + + top2 := podmanTest.Podman([]string{"run", "-dt", "--name", "top2", "--dns", "8.7.7.7", "--dns-search", "homer.com", ALPINE, "top"}) + top2.WaitWithDefaultTimeout() + Expect(top2.ExitCode()).To(BeZero()) + + kube := podmanTest.Podman([]string{"generate", "kube", "top1", "top2"}) + kube.WaitWithDefaultTimeout() + Expect(kube.ExitCode()).To(Equal(0)) + + pod := new(v1.Pod) + err := yaml.Unmarshal(kube.Out.Contents(), pod) + Expect(err).To(BeNil()) + + Expect(StringInSlice("8.8.8.8", pod.Spec.DNSConfig.Nameservers)).To(BeTrue()) + Expect(StringInSlice("8.7.7.7", pod.Spec.DNSConfig.Nameservers)).To(BeTrue()) + Expect(StringInSlice("foobar.com", pod.Spec.DNSConfig.Searches)).To(BeTrue()) + Expect(StringInSlice("homer.com", pod.Spec.DNSConfig.Searches)).To(BeTrue()) + }) + + It("podman generate kube on a pod with dns options", func() { + top := podmanTest.Podman([]string{"run", "--pod", "new:pod1", "-dt", "--name", "top", "--dns", "8.8.8.8", "--dns-search", "foobar.com", "--dns-opt", "color:blue", ALPINE, "top"}) + top.WaitWithDefaultTimeout() + Expect(top.ExitCode()).To(BeZero()) + + kube := podmanTest.Podman([]string{"generate", "kube", "pod1"}) + kube.WaitWithDefaultTimeout() + Expect(kube.ExitCode()).To(Equal(0)) + + pod := new(v1.Pod) + err := yaml.Unmarshal(kube.Out.Contents(), pod) + Expect(err).To(BeNil()) + + Expect(StringInSlice("8.8.8.8", pod.Spec.DNSConfig.Nameservers)).To(BeTrue()) + Expect(StringInSlice("foobar.com", pod.Spec.DNSConfig.Searches)).To(BeTrue()) + Expect(len(pod.Spec.DNSConfig.Options)).To(BeNumerically(">", 0)) + Expect(pod.Spec.DNSConfig.Options[0].Name).To(Equal("color")) + Expect(*pod.Spec.DNSConfig.Options[0].Value).To(Equal("blue")) + }) }) diff --git a/test/e2e/history_test.go b/test/e2e/history_test.go index fea3f4d43..1c57c60de 100644 --- a/test/e2e/history_test.go +++ b/test/e2e/history_test.go @@ -65,6 +65,23 @@ var _ = Describe("Podman history", func() { session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) Expect(len(session.OutputToStringArray())).To(BeNumerically(">", 0)) + + session = podmanTest.Podman([]string{"history", "--no-trunc", "--format", "{{.ID}}", ALPINE}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + lines := session.OutputToStringArray() + Expect(len(lines)).To(BeNumerically(">", 0)) + // the image id must be 64 chars long + Expect(len(lines[0])).To(BeNumerically("==", 64)) + + session = podmanTest.Podman([]string{"history", "--no-trunc", "--format", "{{.CreatedBy}}", ALPINE}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + lines = session.OutputToStringArray() + Expect(len(lines)).To(BeNumerically(">", 0)) + Expect(session.OutputToString()).ToNot(ContainSubstring("...")) + // the second line in the alpine history contains a command longer than 45 chars + Expect(len(lines[1])).To(BeNumerically(">", 45)) }) It("podman history with json flag", func() { diff --git a/test/e2e/kill_test.go b/test/e2e/kill_test.go index 8b31cae72..c1c1b003e 100644 --- a/test/e2e/kill_test.go +++ b/test/e2e/kill_test.go @@ -167,4 +167,20 @@ var _ = Describe("Podman kill", func() { Expect(wait.ExitCode()).To(BeZero()) }) + It("podman stop --all", func() { + session := podmanTest.RunTopContainer("") + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) + + session = podmanTest.RunTopContainer("") + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) + + session = podmanTest.Podman([]string{"kill", "--all"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) + }) }) diff --git a/test/e2e/network_test.go b/test/e2e/network_test.go index e2080244b..124ee7e29 100644 --- a/test/e2e/network_test.go +++ b/test/e2e/network_test.go @@ -408,7 +408,6 @@ var _ = Describe("Podman network", func() { Expect(lines[1]).To(Equal(netName2)) }) It("podman network with multiple aliases", func() { - Skip("Until DNSName is updated on our CI images") var worked bool netName := "aliasTest" + stringid.GenerateNonCryptoID() session := podmanTest.Podman([]string{"network", "create", netName}) @@ -458,6 +457,47 @@ var _ = Describe("Podman network", func() { Expect(nc.ExitCode()).To(Equal(0)) }) + It("podman network create/remove macvlan as driver (-d) no device name", func() { + net := "macvlan" + stringid.GenerateNonCryptoID() + nc := podmanTest.Podman([]string{"network", "create", "-d", "macvlan", net}) + nc.WaitWithDefaultTimeout() + defer podmanTest.removeCNINetwork(net) + Expect(nc.ExitCode()).To(Equal(0)) + + inspect := podmanTest.Podman([]string{"network", "inspect", net}) + inspect.WaitWithDefaultTimeout() + Expect(inspect.ExitCode()).To(BeZero()) + + out, err := inspect.jq(".[0].plugins[0].master") + Expect(err).To(BeNil()) + Expect(out).To(Equal("\"\"")) + + nc = podmanTest.Podman([]string{"network", "rm", net}) + nc.WaitWithDefaultTimeout() + Expect(nc.ExitCode()).To(Equal(0)) + }) + + It("podman network create/remove macvlan as driver (-d) with device name", func() { + net := "macvlan" + stringid.GenerateNonCryptoID() + nc := podmanTest.Podman([]string{"network", "create", "-d", "macvlan", "-o", "parent=lo", net}) + nc.WaitWithDefaultTimeout() + defer podmanTest.removeCNINetwork(net) + Expect(nc.ExitCode()).To(Equal(0)) + + inspect := podmanTest.Podman([]string{"network", "inspect", net}) + inspect.WaitWithDefaultTimeout() + Expect(inspect.ExitCode()).To(BeZero()) + fmt.Println(inspect.OutputToString()) + + out, err := inspect.jq(".[0].plugins[0].master") + Expect(err).To(BeNil()) + Expect(out).To(Equal("\"lo\"")) + + nc = podmanTest.Podman([]string{"network", "rm", net}) + nc.WaitWithDefaultTimeout() + Expect(nc.ExitCode()).To(Equal(0)) + }) + It("podman network exists", func() { net := "net" + stringid.GenerateNonCryptoID() session := podmanTest.Podman([]string{"network", "create", net}) diff --git a/test/e2e/pod_create_test.go b/test/e2e/pod_create_test.go index be0a2f6f0..575f9df68 100644 --- a/test/e2e/pod_create_test.go +++ b/test/e2e/pod_create_test.go @@ -233,7 +233,7 @@ var _ = Describe("Podman pod create", func() { ip := GetRandomIPAddress() podCreate := podmanTest.Podman([]string{"pod", "create", "--ip", ip, "--name", name}) podCreate.WaitWithDefaultTimeout() - // Rootless should error + // Rootless should error without network if rootless.IsRootless() { Expect(podCreate.ExitCode()).To(Equal(125)) } else { @@ -246,7 +246,7 @@ var _ = Describe("Podman pod create", func() { }) It("podman container in pod with IP address shares IP address", func() { - SkipIfRootless("Rootless does not support --ip") + SkipIfRootless("Rootless does not support --ip without network") podName := "test" ctrName := "testCtr" ip := GetRandomIPAddress() @@ -476,4 +476,21 @@ entrypoint ["/fromimage"] Expect(status3.ExitCode()).To(Equal(0)) Expect(strings.Contains(status3.OutputToString(), "Degraded")).To(BeTrue()) }) + + It("podman create with unsupported network options", func() { + podCreate := podmanTest.Podman([]string{"pod", "create", "--network", "none"}) + podCreate.WaitWithDefaultTimeout() + Expect(podCreate.ExitCode()).To(Equal(125)) + Expect(podCreate.ErrorToString()).To(ContainSubstring("pods presently do not support network mode none")) + + podCreate = podmanTest.Podman([]string{"pod", "create", "--network", "container:doesnotmatter"}) + podCreate.WaitWithDefaultTimeout() + Expect(podCreate.ExitCode()).To(Equal(125)) + Expect(podCreate.ErrorToString()).To(ContainSubstring("pods presently do not support network mode container")) + + podCreate = podmanTest.Podman([]string{"pod", "create", "--network", "ns:/does/not/matter"}) + podCreate.WaitWithDefaultTimeout() + Expect(podCreate.ExitCode()).To(Equal(125)) + Expect(podCreate.ErrorToString()).To(ContainSubstring("pods presently do not support network mode path")) + }) }) diff --git a/test/e2e/pod_inspect_test.go b/test/e2e/pod_inspect_test.go index 25212991d..fd9589afe 100644 --- a/test/e2e/pod_inspect_test.go +++ b/test/e2e/pod_inspect_test.go @@ -101,7 +101,7 @@ var _ = Describe("Podman pod inspect", func() { }) It("podman pod inspect outputs show correct MAC", func() { - SkipIfRootless("--mac-address is not supported in rootless mode") + SkipIfRootless("--mac-address is not supported in rootless mode without network") podName := "testPod" macAddr := "42:43:44:00:00:01" create := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--mac-address", macAddr}) diff --git a/test/e2e/ps_test.go b/test/e2e/ps_test.go index 13701fc3b..d12534219 100644 --- a/test/e2e/ps_test.go +++ b/test/e2e/ps_test.go @@ -396,11 +396,14 @@ var _ = Describe("Podman ps", func() { session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) - session = podmanTest.Podman([]string{"ps", "--pod", "--no-trunc"}) - + session = podmanTest.Podman([]string{"ps", "--no-trunc"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(Not(ContainSubstring(podid))) + session = podmanTest.Podman([]string{"ps", "--pod", "--no-trunc"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) Expect(session.OutputToString()).To(ContainSubstring(podid)) }) diff --git a/test/e2e/pull_test.go b/test/e2e/pull_test.go index 4b73004da..d47a3e47a 100644 --- a/test/e2e/pull_test.go +++ b/test/e2e/pull_test.go @@ -522,4 +522,31 @@ var _ = Describe("Podman pull", func() { Expect(data[0].Os).To(Equal(runtime.GOOS)) Expect(data[0].Architecture).To(Equal("arm64")) }) + + It("podman pull --arch", func() { + session := podmanTest.Podman([]string{"pull", "--arch=bogus", ALPINE}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(125)) + expectedError := "no image found in manifest list for architecture bogus" + Expect(session.ErrorToString()).To(ContainSubstring(expectedError)) + + session = podmanTest.Podman([]string{"pull", "--arch=arm64", "--os", "windows", ALPINE}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(125)) + expectedError = "no image found in manifest list for architecture" + Expect(session.ErrorToString()).To(ContainSubstring(expectedError)) + + session = podmanTest.Podman([]string{"pull", "-q", "--arch=arm64", ALPINE}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + setup := podmanTest.Podman([]string{"image", "inspect", session.OutputToString()}) + setup.WaitWithDefaultTimeout() + Expect(setup.ExitCode()).To(Equal(0)) + + data := setup.InspectImageJSON() // returns []inspect.ImageData + Expect(len(data)).To(Equal(1)) + Expect(data[0].Os).To(Equal(runtime.GOOS)) + Expect(data[0].Architecture).To(Equal("arm64")) + }) }) diff --git a/test/e2e/push_test.go b/test/e2e/push_test.go index 922995060..00b5802a3 100644 --- a/test/e2e/push_test.go +++ b/test/e2e/push_test.go @@ -54,10 +54,16 @@ var _ = Describe("Podman push", func() { fmt.Sprintf("dir:%s", bbdir)}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) + + bbdir = filepath.Join(podmanTest.TempDir, "busybox") + session = podmanTest.Podman([]string{"push", "--format", "oci", ALPINE, + fmt.Sprintf("dir:%s", bbdir)}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) }) It("podman push to local registry", func() { - SkipIfRemote("FIXME: This should work") + SkipIfRemote("Remote does not support --digestfile or --remove-signatures") if podmanTest.Host.Arch == "ppc64le" { Skip("No registry image for ppc64le") } @@ -74,7 +80,7 @@ var _ = Describe("Podman push", func() { Skip("Cannot start docker registry.") } - push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"}) + push := podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"}) push.WaitWithDefaultTimeout() Expect(push.ExitCode()).To(Equal(0)) @@ -88,7 +94,6 @@ var _ = Describe("Podman push", func() { }) It("podman push to local registry with authorization", func() { - SkipIfRemote("FIXME: This does not seem to be returning an error") SkipIfRootless("FIXME: Creating content in certs.d we use directories in homedir") if podmanTest.Host.Arch == "ppc64le" { Skip("No registry image for ppc64le") @@ -140,7 +145,7 @@ var _ = Describe("Podman push", func() { session = podmanTest.Podman([]string{"logs", "registry"}) session.WaitWithDefaultTimeout() - push := podmanTest.Podman([]string{"push", "--creds=podmantest:test", ALPINE, "localhost:5000/tlstest"}) + push := podmanTest.Podman([]string{"push", "--format=v2s2", "--creds=podmantest:test", ALPINE, "localhost:5000/tlstest"}) push.WaitWithDefaultTimeout() Expect(push).To(ExitWithError()) @@ -155,9 +160,12 @@ var _ = Describe("Podman push", func() { push.WaitWithDefaultTimeout() Expect(push).To(ExitWithError()) - push = podmanTest.Podman([]string{"push", "--creds=podmantest:test", "--cert-dir=fakedir", ALPINE, "localhost:5000/certdirtest"}) - push.WaitWithDefaultTimeout() - Expect(push).To(ExitWithError()) + if !IsRemote() { + // remote does not support --cert-dir + push = podmanTest.Podman([]string{"push", "--creds=podmantest:test", "--cert-dir=fakedir", ALPINE, "localhost:5000/certdirtest"}) + push.WaitWithDefaultTimeout() + Expect(push).To(ExitWithError()) + } push = podmanTest.Podman([]string{"push", "--creds=podmantest:test", ALPINE, "localhost:5000/defaultflags"}) push.WaitWithDefaultTimeout() diff --git a/test/e2e/restart_test.go b/test/e2e/restart_test.go index 584ccd22b..bcaab8947 100644 --- a/test/e2e/restart_test.go +++ b/test/e2e/restart_test.go @@ -225,4 +225,26 @@ var _ = Describe("Podman restart", func() { // line count should be equal Expect(beforeRestart.OutputToString()).To(Equal(afterRestart.OutputToString())) }) + + It("podman restart --all", func() { + session := podmanTest.RunTopContainer("") + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) + + session = podmanTest.RunTopContainer("") + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) + + session = podmanTest.Podman([]string{"stop", "--all"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) + + session = podmanTest.Podman([]string{"restart", "--all"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) + }) }) diff --git a/test/e2e/rm_test.go b/test/e2e/rm_test.go index ca142d7f3..4c50a61ef 100644 --- a/test/e2e/rm_test.go +++ b/test/e2e/rm_test.go @@ -132,7 +132,7 @@ var _ = Describe("Podman rm", func() { latest := "-l" if IsRemote() { - latest = "test1" + latest = cid } result := podmanTest.Podman([]string{"rm", latest}) result.WaitWithDefaultTimeout() diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go index cbaae7186..ebea2132a 100644 --- a/test/e2e/run_networking_test.go +++ b/test/e2e/run_networking_test.go @@ -621,7 +621,6 @@ var _ = Describe("Podman run networking", func() { }) It("podman run in custom CNI network with --static-ip", func() { - SkipIfRootless("Rootless mode does not support --ip") netName := stringid.GenerateNonCryptoID() ipAddr := "10.25.30.128" create := podmanTest.Podman([]string{"network", "create", "--subnet", "10.25.30.0/24", netName}) @@ -633,10 +632,6 @@ var _ = Describe("Podman run networking", func() { run.WaitWithDefaultTimeout() Expect(run.ExitCode()).To(BeZero()) Expect(run.OutputToString()).To(ContainSubstring(ipAddr)) - - create = podmanTest.Podman([]string{"network", "rm", netName}) - create.WaitWithDefaultTimeout() - Expect(create.ExitCode()).To(BeZero()) }) It("podman rootless fails custom CNI network with --uidmap", func() { @@ -658,7 +653,6 @@ var _ = Describe("Podman run networking", func() { }) It("podman run with new:pod and static-ip", func() { - SkipIfRootless("Rootless does not support --ip") netName := stringid.GenerateNonCryptoID() ipAddr := "10.25.40.128" podname := "testpod" diff --git a/test/e2e/run_staticip_test.go b/test/e2e/run_staticip_test.go index 8383b1812..aeb462ae9 100644 --- a/test/e2e/run_staticip_test.go +++ b/test/e2e/run_staticip_test.go @@ -19,7 +19,7 @@ var _ = Describe("Podman run with --ip flag", func() { ) BeforeEach(func() { - SkipIfRootless("rootless does not support --ip") + SkipIfRootless("rootless does not support --ip without network") tempdir, err = CreateTempDirInTempDir() if err != nil { os.Exit(1) diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go index 7c74cea78..bc89b59de 100644 --- a/test/e2e/run_volume_test.go +++ b/test/e2e/run_volume_test.go @@ -110,7 +110,7 @@ var _ = Describe("Podman run with volumes", func() { Expect(session.ExitCode()).To(Equal(0)) Expect(session.OutputToString()).To(ContainSubstring(dest + " ro")) - session = podmanTest.Podman([]string{"run", "--rm", "--mount", mount + ",shared", ALPINE, "grep", dest, "/proc/self/mountinfo"}) + session = podmanTest.Podman([]string{"run", "--rm", "--mount", mount + ",consistency=delegated,shared", ALPINE, "grep", dest, "/proc/self/mountinfo"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) found, matches := session.GrepString(dest) diff --git a/test/e2e/stop_test.go b/test/e2e/stop_test.go index c25709a63..750d38ffb 100644 --- a/test/e2e/stop_test.go +++ b/test/e2e/stop_test.go @@ -164,13 +164,14 @@ var _ = Describe("Podman stop", func() { }) It("podman stop container --timeout", func() { - session := podmanTest.RunTopContainer("test5") + session := podmanTest.Podman([]string{"run", "-d", "--name", "test5", ALPINE, "sleep", "100"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) cid1 := session.OutputToString() - session = podmanTest.Podman([]string{"stop", "--timeout", "1", "test5"}) - session.WaitWithDefaultTimeout() + // Without timeout container stops in 10 seconds + // If not stopped in 5 seconds, then --timeout did not work + session.Wait(5) Expect(session.ExitCode()).To(Equal(0)) output := session.OutputToString() Expect(output).To(ContainSubstring(cid1)) @@ -307,4 +308,38 @@ var _ = Describe("Podman stop", func() { result.WaitWithDefaultTimeout() Expect(result.ExitCode()).To(Equal(125)) }) + + It("podman stop --all", func() { + session := podmanTest.RunTopContainer("") + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) + + session = podmanTest.RunTopContainer("") + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) + + session = podmanTest.Podman([]string{"stop", "--all"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) + }) + + It("podman stop --ignore", func() { + session := podmanTest.RunTopContainer("") + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + cid := session.OutputToString() + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) + + session = podmanTest.Podman([]string{"stop", "bogus", cid}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(125)) + + session = podmanTest.Podman([]string{"stop", "--ignore", "bogus", cid}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) + }) }) diff --git a/test/system/040-ps.bats b/test/system/040-ps.bats index 0ae8b0ce0..ae27c479f 100644 --- a/test/system/040-ps.bats +++ b/test/system/040-ps.bats @@ -82,11 +82,10 @@ load helpers run_podman rm -a } -@test "podman ps -a --storage" { - skip_if_remote "ps --storage does not work over remote" +@test "podman ps -a --external" { # Setup: ensure that we have no hidden storage containers - run_podman ps --storage -a + run_podman ps --external -a is "${#lines[@]}" "1" "setup check: no storage containers at start of test" # Force a buildah timeout; this leaves a buildah container behind @@ -98,18 +97,18 @@ EOF run_podman ps -a is "${#lines[@]}" "1" "podman ps -a does not see buildah container" - run_podman ps --storage -a - is "${#lines[@]}" "2" "podman ps -a --storage sees buildah container" + run_podman ps --external -a + is "${#lines[@]}" "2" "podman ps -a --external sees buildah container" is "${lines[1]}" \ "[0-9a-f]\{12\} \+$IMAGE *buildah .* seconds ago .* storage .* ${PODMAN_TEST_IMAGE_NAME}-working-container" \ - "podman ps --storage" + "podman ps --external" cid="${lines[1]:0:12}" # 'rm -a' should be a NOP run_podman rm -a - run_podman ps --storage -a - is "${#lines[@]}" "2" "podman ps -a --storage sees buildah container" + run_podman ps --external -a + is "${#lines[@]}" "2" "podman ps -a --external sees buildah container" # We can't rm it without -f, but podman should issue a helpful message run_podman 2 rm "$cid" @@ -118,7 +117,7 @@ EOF # With -f, we can remove it. run_podman rm -f "$cid" - run_podman ps --storage -a + run_podman ps --external -a is "${#lines[@]}" "1" "storage container has been removed" } diff --git a/test/system/075-exec.bats b/test/system/075-exec.bats index c028e16c9..badf44c49 100644 --- a/test/system/075-exec.bats +++ b/test/system/075-exec.bats @@ -6,8 +6,6 @@ load helpers @test "podman exec - basic test" { - skip_if_remote "FIXME: pending #7241" - rand_filename=$(random_string 20) rand_content=$(random_string 50) diff --git a/test/system/150-login.bats b/test/system/150-login.bats index 5151ab0e1..c3af63348 100644 --- a/test/system/150-login.bats +++ b/test/system/150-login.bats @@ -197,6 +197,7 @@ EOF destname=ok-$(random_string 10 | tr A-Z a-z)-ok # Use command-line credentials run_podman push --tls-verify=false \ + --format docker \ --creds ${PODMAN_LOGIN_USER}:${PODMAN_LOGIN_PASS} \ $IMAGE localhost:${PODMAN_LOGIN_REGISTRY_PORT}/$destname diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 25f02db19..0ad3069ce 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,6 +2,24 @@ # Changelog +## v1.19.3 (2021-01-28) + [ci:docs] Fix man page for buildah push + Vendor in containers/image v5.10.1 + Rebuild layer if a change in ARG is detected + Bump golang.org/x/crypto to latest rel-1.19 + local image lookup by digest + Use build-arg ENV val from local environment if set + Pick default OCI Runtime from containers.conf + +## v1.19.2 (2021-01-15) + If overlay mount point destination does not exists, do not throw error + Vendor in containers/common + +## v1.19.1 (2021-01-14) + Cherry pick localhost fix and update CI configuration for release-1.19 + use local image name for pull policy checks + Vendor in common 0.33.1 + ## v1.19.0 (2021-01-08) Update vendor of containers/storage and containers/common Buildah inspect should be able to inspect manifests diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 89fc860dd..4fbc475c2 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -28,7 +28,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.19.2" + Version = "1.19.3" // The value we use to identify what type of information, currently a // serialized Builder structure, we are using as per-container state. // This should only be changed when we make incompatible changes to diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index ce2f2696f..db2faf71a 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,3 +1,21 @@ +- Changelog for v1.19.3 (2021-01-28) + * [ci:docs] Fix man page for buildah push + * Vendor in containers/image v5.10.1 + * Rebuild layer if a change in ARG is detected + * Bump golang.org/x/crypto to latest rel-1.19 + * local image lookup by digest + * Use build-arg ENV val from local environment if set + * Pick default OCI Runtime from containers.conf + +- Changelog for v1.19.2 (2021-01-15) + * If overlay mount point destination does not exists, do not throw error + * Vendor in containers/common + +- Changelog for v1.19.1 (2021-01-14) + * Cherry pick localhost fix and update CI configuration for release-1.19 + * use local image name for pull policy checks + * Vendor in common 0.33.1 + - Changelog for v1.19.0 (2021-01-08) * Update vendor of containers/storage and containers/common * Buildah inspect should be able to inspect manifests diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod index 135926116..cccf42895 100644 --- a/vendor/github.com/containers/buildah/go.mod +++ b/vendor/github.com/containers/buildah/go.mod @@ -6,7 +6,7 @@ require ( github.com/containerd/containerd v1.4.1 // indirect github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 github.com/containers/common v0.33.1 - github.com/containers/image/v5 v5.9.0 + github.com/containers/image/v5 v5.10.1 github.com/containers/ocicrypt v1.0.3 github.com/containers/storage v1.24.5 github.com/docker/distribution v2.7.1+incompatible @@ -33,12 +33,12 @@ require ( github.com/sirupsen/logrus v1.7.0 github.com/spf13/cobra v1.1.1 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.6.1 + github.com/stretchr/testify v1.7.0 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 go.etcd.io/bbolt v1.3.5 - golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 + golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a - golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 + golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 gotest.tools/v3 v3.0.3 // indirect k8s.io/klog v1.0.0 // indirect ) diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum index 6a5f70a36..bf796c496 100644 --- a/vendor/github.com/containers/buildah/go.sum +++ b/vendor/github.com/containers/buildah/go.sum @@ -82,6 +82,8 @@ github.com/containers/common v0.33.1 h1:XpDiq8Cta8+u1s4kpYSEWdB140ZmqgyIXfWkLqKx github.com/containers/common v0.33.1/go.mod h1:mjDo/NKeweL/onaspLhZ38WnHXaYmrELHclIdvSnYpY= github.com/containers/image/v5 v5.9.0 h1:dRmUtcluQcmasNo3DpnRoZjfU0rOu1qZeL6wlDJr10Q= github.com/containers/image/v5 v5.9.0/go.mod h1:blOEFd/iFdeyh891ByhCVUc+xAcaI3gBegXECwz9UbQ= +github.com/containers/image/v5 v5.10.1 h1:tHhGQ8RCMxJfJLD/PEW1qrOKX8nndledW9qz6UiAxns= +github.com/containers/image/v5 v5.10.1/go.mod h1:JlRLJZv7elVbtHaaaR6Kz8i6G3k2ttj4t7fubwxD9Hs= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c= @@ -238,6 +240,8 @@ github.com/klauspost/compress v1.11.3 h1:dB4Bn0tN3wdCzQxnS8r06kV74qN/TAfaIS0bVE8 github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.5 h1:xNCE0uE6yvTPRS+0wGNMHPo3NIpwnk6aluQZ6R6kRcc= github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= @@ -404,6 +408,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -412,6 +418,8 @@ github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.9 h1:RsKRIA2MO8x56wkkcd3LbtcE/uMszhb6DpRf+3uwa3I= +github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -419,6 +427,8 @@ github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02 github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= github.com/vbauerster/mpb/v5 v5.3.0 h1:vgrEJjUzHaSZKDRRxul5Oh4C72Yy/5VEMb0em+9M0mQ= github.com/vbauerster/mpb/v5 v5.3.0/go.mod h1:4yTkvAb8Cm4eylAp6t0JRq6pXDkFJ4krUlDqWYkakAs= +github.com/vbauerster/mpb/v5 v5.4.0 h1:n8JPunifvQvh6P1D1HAl2Ur9YcmKT1tpoUuiea5mlmg= +github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8JDaKXR4JGI= github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= @@ -456,6 +466,8 @@ golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -527,6 +539,7 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -543,6 +556,10 @@ golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 h1:+CBz4km/0KPU3RGTwARGh/noP3bEwtHcq+0YcBQM2JQ= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index 191645b89..9c15785bc 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -834,7 +834,11 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // Check if there's already an image based on our parent that // has the same change that we're about to make, so far as we // can tell. - if checkForLayers { + // Only do this if there were no build args given by the user, + // we need to call ib.Run() to correctly put the args together before + // determining if a cached layer with the same build args already exists + // and that is done in the if block below. + if checkForLayers && s.builder.Args == nil { cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) if err != nil { return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build") @@ -1022,6 +1026,9 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri return "/bin/sh" } switch strings.ToUpper(node.Value) { + case "ARG": + buildArgs := s.getBuildArgs() + return "/bin/sh -c #(nop) ARG " + buildArgs case "RUN": buildArgs := s.getBuildArgs() if buildArgs != "" { diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go index 4d70e0146..2ee86dd13 100644 --- a/vendor/github.com/containers/buildah/new.go +++ b/vendor/github.com/containers/buildah/new.go @@ -150,10 +150,10 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store return nil, "", nil, err } - // If we could resolve the image locally, check if it was referenced by - // ID. In that case, we don't need to bother any further and can - // prevent prompting the user. - if localImage != nil && strings.HasPrefix(localImage.ID, options.FromImage) { + // If we could resolve the image locally, check if it was clearly + // referring to a local image, either by ID or digest. In that case, + // we don't need to perform a remote lookup. + if localImage != nil && (strings.HasPrefix(localImage.ID, options.FromImage) || strings.HasPrefix(options.FromImage, "sha256:")) { return localImageRef, localImageRef.Transport().Name(), localImage, nil } diff --git a/vendor/github.com/containers/buildah/util/types.go b/vendor/github.com/containers/buildah/util/types.go index dc5f4b6c8..ca0f31532 100644 --- a/vendor/github.com/containers/buildah/util/types.go +++ b/vendor/github.com/containers/buildah/util/types.go @@ -1,7 +1,7 @@ package util const ( - // DefaultRuntime is the default command to use to run the container. + // Deprecated: Default runtime should come from containers.conf DefaultRuntime = "runc" // DefaultCNIPluginPath is the default location of CNI plugin helpers. DefaultCNIPluginPath = "/usr/libexec/cni:/opt/cni/bin" diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index 99f68d9e1..338c4503a 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -20,6 +20,7 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/docker/distribution/registry/api/errcode" + "github.com/opencontainers/go-digest" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -68,6 +69,19 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto return []string{img.ID}, "", false, nil } } + // If we're referring to an image by digest, it *must* be local and we + // should not have any fall through/back logic. + if strings.HasPrefix(name, "sha256:") { + d, err := digest.Parse(name) + if err != nil { + return nil, "", false, err + } + img, err := store.Image(d.Encoded()) + if err != nil { + return nil, "", false, err + } + return []string{img.ID}, "", false, nil + } // Transports are not supported for local image look ups. srcRef, err := alltransports.ParseImageName(name) @@ -263,7 +277,12 @@ func Runtime() string { return "crun" } - return DefaultRuntime + conf, err := config.Default() + if err != nil { + logrus.Warnf("Error loading container config when searching for local runtime: %v", err) + return DefaultRuntime + } + return conf.Engine.OCIRuntime } // StringInSlice returns a boolean indicating if the exact value s is present diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index 485db4d30..b5c755e18 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -14,6 +14,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" + internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/pkg/platform" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache" @@ -47,7 +48,7 @@ var ( // maxParallelDownloads is used to limit the maxmimum number of parallel // downloads. Let's follow Firefox by limiting it to 6. - maxParallelDownloads = 6 + maxParallelDownloads = uint(6) ) // compressionBufferSize is the buffer size used to compress a blob @@ -107,18 +108,19 @@ func (d *digestingReader) Read(p []byte) (int, error) { // copier allows us to keep track of diffID values for blobs, and other // data shared across one or more images in a possible manifest list. type copier struct { - dest types.ImageDestination - rawSource types.ImageSource - reportWriter io.Writer - progressOutput io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties - blobInfoCache types.BlobInfoCache - copyInParallel bool - compressionFormat compression.Algorithm - compressionLevel *int - ociDecryptConfig *encconfig.DecryptConfig - ociEncryptConfig *encconfig.EncryptConfig + dest types.ImageDestination + rawSource types.ImageSource + reportWriter io.Writer + progressOutput io.Writer + progressInterval time.Duration + progress chan types.ProgressProperties + blobInfoCache internalblobinfocache.BlobInfoCache2 + copyInParallel bool + compressionFormat compression.Algorithm + compressionLevel *int + ociDecryptConfig *encconfig.DecryptConfig + ociEncryptConfig *encconfig.EncryptConfig + maxParallelDownloads uint } // imageCopier tracks state specific to a single image (possibly an item of a manifest list) @@ -190,6 +192,8 @@ type Options struct { // OciDecryptConfig contains the config that can be used to decrypt an image if it is // encrypted if non-nil. If nil, it does not attempt to decrypt an image. OciDecryptConfig *encconfig.DecryptConfig + // MaxParallelDownloads indicates the maximum layers to pull at the same time. A reasonable default is used if this is left as 0. + MaxParallelDownloads uint } // validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value @@ -265,9 +269,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually // we might want to add a separate CommonCtx — or would that be too confusing? - blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx), - ociDecryptConfig: options.OciDecryptConfig, - ociEncryptConfig: options.OciEncryptConfig, + blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)), + ociDecryptConfig: options.OciDecryptConfig, + ociEncryptConfig: options.OciEncryptConfig, + maxParallelDownloads: options.MaxParallelDownloads, } // Default to using gzip compression unless specified otherwise. if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil { @@ -648,13 +653,19 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support // without actually trying to upload something and getting a types.ManifestTypeRejectedError. - // So, try the preferred manifest MIME type. If the process succeeds, fine… + // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if + // we're altering how they're compressed. If the process succeeds, fine… manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) retManifestType = preferredManifestMIMEType if err != nil { logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) - // … if it fails, _and_ the failure is because the manifest is rejected, we may have other options. - if _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError); !isManifestRejected || len(otherManifestMIMETypeCandidates) == 0 { + // … if it fails, and the failure is either because the manifest is rejected by the registry, or + // because we failed to create a manifest of the specified type because the specific manifest type + // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may + // have other options available that could still succeed. + _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError) + _, isCompressionIncompatible := errors.Cause(err).(manifest.ManifestLayerCompressionIncompatibilityError) + if (!isManifestRejected && !isCompressionIncompatible) || len(otherManifestMIMETypeCandidates) == 0 { // We don’t have other options. // In principle the code below would handle this as well, but the resulting error message is fairly ugly. // Don’t bother the user with MIME types if we have no choice. @@ -809,7 +820,11 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { // avoid malicious images causing troubles and to be nice to servers. var copySemaphore *semaphore.Weighted if ic.c.copyInParallel { - copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads)) + max := ic.c.maxParallelDownloads + if max == 0 { + max = maxParallelDownloads + } + copySemaphore = semaphore.NewWeighted(int64(max)) } else { copySemaphore = semaphore.NewWeighted(int64(1)) } @@ -896,7 +911,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { return nil } -// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields) +// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields) func layerDigestsDiffer(a, b []types.BlobInfo) bool { if len(a) != len(b) { return true @@ -951,7 +966,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc instanceDigest = &manifestDigest } if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { - return nil, "", errors.Wrap(err, "Error writing manifest") + return nil, "", errors.Wrapf(err, "Error writing manifest %q", string(man)) } return man, manifestDigest, nil } @@ -1049,7 +1064,7 @@ type diffIDResult struct { err error } -// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps compressing it if canCompress, +// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it, // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) { cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" @@ -1058,6 +1073,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. if !diffIDIsNeeded { + // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm + // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing + // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause + // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. + // Fixing that will probably require passing more information to TryReusingBlob() than the current version of + // the ImageDestination interface lets us pass in. reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest) @@ -1115,7 +1136,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. // it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, -// perhaps compressing the stream if canCompress, +// perhaps (de/re/)compressing the stream, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) { @@ -1191,11 +1212,15 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) { // copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, // perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, -// perhaps compressing it if canCompress, +// perhaps (de/re/)compressing it if canModifyBlob, // and returns a complete blobInfo of the copied blob. func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, error) { + if isConfig { // This is guaranteed by the caller, but set it here to be explicit. + canModifyBlob = false + } + // The copying happens through a pipeline of connected io.Readers. // === Input: srcStream @@ -1253,16 +1278,23 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr originalLayerReader = destStream } - desiredCompressionFormat := c.compressionFormat - // === Deal with layer compression/decompression if necessary var inputInfo types.BlobInfo var compressionOperation types.LayerCompression + uploadCompressionFormat := &c.compressionFormat + srcCompressorName := internalblobinfocache.Uncompressed + if isCompressed { + srcCompressorName = compressionFormat.Name() + } + var uploadCompressorName string if canModifyBlob && isOciEncrypted(srcInfo.MediaType) { // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted logrus.Debugf("Using original blob without modification for encrypted blob") compressionOperation = types.PreserveOriginal inputInfo = srcInfo + srcCompressorName = internalblobinfocache.UnknownCompression + uploadCompressorName = internalblobinfocache.UnknownCompression + uploadCompressionFormat = nil } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed { logrus.Debugf("Compressing blob on the fly") compressionOperation = types.Compress @@ -1272,11 +1304,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, // we don’t care. - go c.compressGoroutine(pipeWriter, destStream, desiredCompressionFormat) // Closes pipeWriter + go c.compressGoroutine(pipeWriter, destStream, *uploadCompressionFormat) // Closes pipeWriter destStream = pipeReader inputInfo.Digest = "" inputInfo.Size = -1 - } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && desiredCompressionFormat.Name() != compressionFormat.Name() { + uploadCompressorName = uploadCompressionFormat.Name() + } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && uploadCompressionFormat.Name() != compressionFormat.Name() { // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally // re-compressed using the desired format. logrus.Debugf("Blob will be converted") @@ -1291,11 +1324,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr pipeReader, pipeWriter := io.Pipe() defer pipeReader.Close() - go c.compressGoroutine(pipeWriter, s, desiredCompressionFormat) // Closes pipeWriter + go c.compressGoroutine(pipeWriter, s, *uploadCompressionFormat) // Closes pipeWriter destStream = pipeReader inputInfo.Digest = "" inputInfo.Size = -1 + uploadCompressorName = uploadCompressionFormat.Name() } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed { logrus.Debugf("Blob will be decompressed") compressionOperation = types.Decompress @@ -1307,11 +1341,15 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr destStream = s inputInfo.Digest = "" inputInfo.Size = -1 + uploadCompressorName = internalblobinfocache.Uncompressed + uploadCompressionFormat = nil } else { // PreserveOriginal might also need to recompress the original blob if the desired compression format is different. logrus.Debugf("Using original blob without modification") compressionOperation = types.PreserveOriginal inputInfo = srcInfo + uploadCompressorName = srcCompressorName + uploadCompressionFormat = nil } // Perform image encryption for valid mediatypes if ociEncryptConfig provided @@ -1371,9 +1409,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr uploadedInfo.CompressionOperation = compressionOperation // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. - if canModifyBlob && !isConfig { - uploadedInfo.CompressionAlgorithm = &desiredCompressionFormat - } + uploadedInfo.CompressionAlgorithm = uploadCompressionFormat if decrypted { uploadedInfo.CryptoOperation = types.Decrypt } else if encrypted { @@ -1390,7 +1426,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } } - // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer + // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. // So, read everything from originalLayerReader, which will cause the rest to be // sent there if we are not already at EOF. @@ -1423,6 +1459,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr default: return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) } + if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName) + } + if srcInfo.Digest != "" && srcCompressorName != "" && srcCompressorName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName) + } } return uploadedInfo, nil } diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index 2b81c8360..5cafd2674 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -194,7 +194,9 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { @@ -210,7 +212,6 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo return false, types.BlobInfo{}, err } return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil - } // PutManifest writes manifest to the destination. @@ -251,7 +252,7 @@ func pathExists(path string) (bool, error) { if err == nil { return true, nil } - if err != nil && os.IsNotExist(err) { + if os.IsNotExist(err) { return false, nil } return false, err diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 797be45a2..be46508de 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -23,6 +23,7 @@ import ( "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/pkg/tlsclientconfig" "github.com/containers/image/v5/types" + "github.com/containers/image/v5/version" "github.com/containers/storage/pkg/homedir" clientLib "github.com/docker/distribution/registry/client" "github.com/docker/go-connections/tlsconfig" @@ -65,6 +66,8 @@ var ( {path: "/etc/containers/certs.d", absolute: true}, {path: "/etc/docker/certs.d", absolute: true}, } + + defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)" ) // extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: @@ -92,8 +95,9 @@ type bearerToken struct { // dockerClient is configuration for dealing with a single Docker registry. type dockerClient struct { // The following members are set by newDockerClient and do not change afterwards. - sys *types.SystemContext - registry string + sys *types.SystemContext + registry string + userAgent string // tlsClientConfig is setup by newDockerClient and will be used and updated // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. @@ -200,9 +204,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { logrus.Debugf("error accessing certs directory due to permissions: %v", err) continue } - if err != nil { - return "", err - } + return "", err } return fullCertDirPath, nil } @@ -277,9 +279,15 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc } tlsClientConfig.InsecureSkipVerify = skipVerify + userAgent := defaultUserAgent + if sys != nil && sys.DockerRegistryUserAgent != "" { + userAgent = sys.DockerRegistryUserAgent + } + return &dockerClient{ sys: sys, registry: registry, + userAgent: userAgent, tlsClientConfig: tlsClientConfig, }, nil } @@ -529,9 +537,7 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, req.Header.Add(n, hh) } } - if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { - req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) - } + req.Header.Add("User-Agent", c.userAgent) if auth == v2Auth { if err := c.setupRequestAuth(req, extraScope); err != nil { return nil, err @@ -637,9 +643,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall params.Add("client_id", "containers/image") authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode())) - if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { - authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) - } + authReq.Header.Add("User-Agent", c.userAgent) authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded") logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) res, err := c.client.Do(authReq) @@ -692,9 +696,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, if c.auth.Username != "" && c.auth.Password != "" { authReq.SetBasicAuth(c.auth.Username, c.auth.Password) } - if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { - authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) - } + authReq.Header.Add("User-Agent", c.userAgent) logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) res, err := c.client.Do(authReq) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index ac63ac121..842dcfba6 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -15,6 +15,7 @@ import ( "strings" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/iolimits" "github.com/containers/image/v5/internal/uploadreader" "github.com/containers/image/v5/manifest" @@ -284,7 +285,9 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { @@ -299,17 +302,23 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types. } if exists { cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) - return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil + return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil } // Then try reusing blobs from other locations. - for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) { + bic := blobinfocache.FromBlobInfoCache(cache) + candidates := bic.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) + for _, candidate := range candidates { candidateRepo, err := parseBICLocationReference(candidate.Location) if err != nil { logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) continue } - logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name()) + if candidate.CompressorName != blobinfocache.Uncompressed { + logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name()) + } else { + logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name()) + } // Sanity checks: if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { @@ -351,8 +360,16 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types. continue } } - cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) - return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil + + bic.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) + + compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName) + if err != nil { + logrus.Debugf("... Failed: %v", err) + continue + } + + return true, types.BlobInfo{Digest: candidate.Digest, MediaType: info.MediaType, Size: size, CompressionOperation: compressionOperation, CompressionAlgorithm: compressionAlgorithm}, nil } return false, types.BlobInfo{}, nil diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 70ca7661e..bff950bb0 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -64,6 +64,11 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef } attempts := []attempt{} for _, pullSource := range pullSources { + if sys != nil && sys.DockerLogMirrorChoice { + logrus.Infof("Trying to access %q", pullSource.Reference) + } else { + logrus.Debugf("Trying to access %q", pullSource.Reference) + } logrus.Debugf("Trying to access %q", pullSource.Reference) s, err := newImageSourceAttempt(ctx, sys, ref, pullSource) if err == nil { diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go index 41d2c5e81..9559dfb56 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -159,7 +159,9 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/lookaside.go index 06d616d01..0d5d8d82a 100644 --- a/vendor/github.com/containers/image/v5/docker/lookaside.go +++ b/vendor/github.com/containers/image/v5/docker/lookaside.go @@ -21,7 +21,7 @@ import ( // systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. // You can override this at build time with -// -ldflags '-X github.com/containers/image/docker.systemRegistriesDirPath=$your_path' +// -ldflags '-X github.com/containers/image/v5/docker.systemRegistriesDirPath=$your_path' var systemRegistriesDirPath = builtinRegistriesDirPath // builtinRegistriesDirPath is the path to registries.d. @@ -96,10 +96,16 @@ func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, // registriesDirPath returns a path to registries.d func registriesDirPath(sys *types.SystemContext) string { + return registriesDirPathWithHomeDir(sys, homedir.Get()) +} + +// registriesDirPathWithHomeDir is an internal implementation detail of registriesDirPath, +// it exists only to allow testing it with an artificial home directory. +func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) string { if sys != nil && sys.RegistriesDirPath != "" { return sys.RegistriesDirPath } - userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir) + userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir) if _, err := os.Stat(userRegistriesDirPath); err == nil { return userRegistriesDirPath } diff --git a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go index 61d9aab9a..94e9e5f23 100644 --- a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go +++ b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go @@ -52,5 +52,26 @@ func DockerReferenceNamespaces(ref reference.Named) []string { } name = name[:lastSlash] } + + // Strip port number if any, before appending to res slice. + // Currently, the most compatible behavior is to return + // example.com:8443/ns, example.com:8443, *.com. + // If a port number is not specified, the expected behavior would be + // example.com/ns, example.com, *.com + portNumColon := strings.Index(name, ":") + if portNumColon != -1 { + name = name[:portNumColon] + } + + // Append wildcarded domains to res slice + for { + firstDot := strings.Index(name, ".") + if firstDot == -1 { + break + } + name = name[firstDot+1:] + + res = append(res, "*."+name) + } return res } diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go index e16829d96..4f2465cac 100644 --- a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go @@ -86,7 +86,9 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go index e4e01d5d9..61ca83364 100644 --- a/vendor/github.com/containers/image/v5/image/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go @@ -154,6 +154,9 @@ func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError +// if the CompressionOperation and CompressionAlgorithm specified in one or more +// options.LayerInfos items is anything other than gzip. func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. src: m.src, diff --git a/vendor/github.com/containers/image/v5/image/oci.go b/vendor/github.com/containers/image/v5/image/oci.go index 5cb04f979..58e9c03ba 100644 --- a/vendor/github.com/containers/image/v5/image/oci.go +++ b/vendor/github.com/containers/image/v5/image/oci.go @@ -134,6 +134,10 @@ func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdat // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError +// if the combination of CompressionOperation and CompressionAlgorithm specified +// in one or more options.LayerInfos items indicates that a layer is compressed using +// an algorithm that is not allowed in OCI. func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. src: m.src, diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go new file mode 100644 index 000000000..1dceaa669 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go @@ -0,0 +1,63 @@ +package blobinfocache + +import ( + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" +) + +// FromBlobInfoCache returns a BlobInfoCache2 based on a BlobInfoCache, returning the original +// object if it implements BlobInfoCache2, or a wrapper which discards compression information +// if it only implements BlobInfoCache. +func FromBlobInfoCache(bic types.BlobInfoCache) BlobInfoCache2 { + if bic2, ok := bic.(BlobInfoCache2); ok { + return bic2 + } + return &v1OnlyBlobInfoCache{ + BlobInfoCache: bic, + } +} + +type v1OnlyBlobInfoCache struct { + types.BlobInfoCache +} + +func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { +} + +func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 { + return nil +} + +// CandidateLocationsFromV2 converts a slice of BICReplacementCandidate2 to a slice of +// types.BICReplacementCandidate, dropping compression information. +func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.BICReplacementCandidate { + candidates := make([]types.BICReplacementCandidate, 0, len(v2candidates)) + for _, c := range v2candidates { + candidates = append(candidates, types.BICReplacementCandidate{ + Digest: c.Digest, + Location: c.Location, + }) + } + return candidates +} + +// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm +// values suitable for inclusion in a types.BlobInfo structure, based on the name of the +// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by +// TryReusingBlob() implementations to set values in the BlobInfo structure that they return +// upon success. +func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compression.Algorithm, error) { + switch compressorName { + case Uncompressed: + return types.Decompress, nil, nil + case UnknownCompression: + return types.PreserveOriginal, nil, nil + default: + algo, err := compression.AlgorithmByName(compressorName) + if err == nil { + return types.Compress, &algo, nil + } + return types.PreserveOriginal, nil, err + } +} diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go new file mode 100644 index 000000000..3c2be57f3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go @@ -0,0 +1,45 @@ +package blobinfocache + +import ( + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" +) + +const ( + // Uncompressed is the value we store in a blob info cache to indicate that we know that + // the blob in the corresponding location is not compressed. + Uncompressed = "uncompressed" + // UnknownCompression is the value we store in a blob info cache to indicate that we don't + // know if the blob in the corresponding location is compressed (and if so, how) or not. + UnknownCompression = "unknown" +) + +// BlobInfoCache2 extends BlobInfoCache by adding the ability to track information about what kind +// of compression was applied to the blobs it keeps information about. +type BlobInfoCache2 interface { + types.BlobInfoCache + // RecordDigestCompressorName records a compressor for the blob with the specified digest, + // or Uncompressed or UnknownCompression. + // WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a + // digest just because some remote author claims so (e.g. because a manifest says so); + // otherwise the cache could be poisoned and cause us to make incorrect edits to type + // information in a manifest. + RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) + // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations + // that could possibly be reused within the specified (transport scope) (if they still + // exist, which is not guaranteed). + // + // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if + // canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look + // up variants of the blob which have the same uncompressed digest. + // + // The CompressorName fields in returned data must never be UnknownCompression. + CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 +} + +// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. +type BICReplacementCandidate2 struct { + Digest digest.Digest + CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression + Location types.BICLocationReference +} diff --git a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go index a3081f4f2..809446e18 100644 --- a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go +++ b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go @@ -9,7 +9,7 @@ import ( // unixTempDirForBigFiles is the directory path to store big files on non Windows systems. // You can override this at build time with -// -ldflags '-X github.com/containers/image/internal/tmpdir.unixTempDirForBigFiles=$your_path' +// -ldflags '-X github.com/containers/image/v5/internal/tmpdir.unixTempDirForBigFiles=$your_path' var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles // builtinUnixTempDirForBigFiles is the directory path to store big files. diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index fa2b39e0e..3ece948a0 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -5,7 +5,6 @@ import ( "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/types" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -54,6 +53,12 @@ const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that mean // compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil // to mean "no compression"), based on variantTable. +// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants +// that differ only in what type of compression is applied, but it can't be combined with this +// algorithm to produce an updated MIME type that complies with the standard that defines mimeType. +// If the compression algorithm is unrecognized, or mimeType is not known to have variants that +// differ from it only in what type of compression has been applied, the returned error will not be +// a ManifestLayerCompressionIncompatibilityError. func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compression.Algorithm) (string, error) { if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries return "", fmt.Errorf("cannot update unknown MIME type") @@ -70,15 +75,15 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType return res, nil } if name != mtsUncompressed { - return "", fmt.Errorf("%s compression is not supported", name) + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mt)} } - return "", errors.New("uncompressed variant is not supported") + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} } if name != mtsUncompressed { - return "", fmt.Errorf("unknown compression algorithm %s", name) + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mt)} } // We can't very well say “the idea of no compression is unknown” - return "", errors.New("uncompressed variant is not supported") + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} } } } @@ -89,7 +94,11 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType } // updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to -// mimeType, based on variantTable. It may use updated.Digest for error messages. +// mimeType, based on variantTable. It may use updated.Digest for error messages. +// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants +// that differ only in what type of compression is applied, but applying updated.CompressionOperation +// and updated.CompressionAlgorithm to it won't produce an updated MIME type that complies with the +// standard that defines mimeType. func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, updated types.BlobInfo) (string, error) { // Note that manifests in containers-storage might be reporting the // wrong media type since the original manifests are stored while layers @@ -99,6 +108,12 @@ func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, upd // {de}compressed. switch updated.CompressionOperation { case types.PreserveOriginal: + // Force a change to the media type if we're being told to use a particular compressor, + // since it might be different from the one associated with the media type. Otherwise, + // try to keep the original media type. + if updated.CompressionAlgorithm != nil { + return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm) + } // Keep the original media type. return mimeType, nil @@ -116,3 +131,14 @@ func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, upd return "", fmt.Errorf("unknown compression operation (%d)", updated.CompressionOperation) } } + +// ManifestLayerCompressionIncompatibilityError indicates that a specified compression algorithm +// could not be applied to a layer MIME type. A caller that receives this should either retry +// the call with a different compression algorithm, or attempt to use a different manifest type. +type ManifestLayerCompressionIncompatibilityError struct { + text string +} + +func (m ManifestLayerCompressionIncompatibilityError) Error() string { + return m.text +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go index 8d8bb9e01..6cb605263 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -226,6 +226,8 @@ var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{ } // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and +// CompressionAlgorithm that would result in anything other than gzip compression. func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { if len(m.LayersDescriptors) != len(layerInfos) { return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index 292614593..c6299d8e6 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -108,6 +108,8 @@ var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{ } // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers) +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and +// CompressionAlgorithm that isn't supported by OCI. func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { if len(m.Layers) != len(layerInfos) { return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go index 23d471325..c874eb775 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -103,7 +103,9 @@ func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Read // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index 0c88e1ef0..1230e8ca3 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -186,7 +186,9 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { @@ -204,6 +206,7 @@ func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.Blo if err != nil { return false, types.BlobInfo{}, err } + return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go index c4c84dd54..426046e66 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -410,7 +410,9 @@ func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reade // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go index b518122e2..c91a49c57 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -339,7 +339,9 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go index 200dab593..2c211b8b8 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go @@ -7,6 +7,7 @@ import ( "sync" "time" + "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" @@ -22,6 +23,9 @@ var ( // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. uncompressedDigestBucket = []byte("uncompressedDigest") + // digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed + // It may not exist in caches created by older versions, even if uncompressedDigestBucket is present. + digestCompressorBucket = []byte("digestCompressor") // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest // (as a set of key=digest, value="" pairs) digestByUncompressedBucket = []byte("digestByUncompressed") @@ -95,6 +99,9 @@ type cache struct { // // Most users should call blobinfocache.DefaultCache instead. func New(path string) types.BlobInfoCache { + return new2(path) +} +func new2(path string) *cache { return &cache{path: path} } @@ -220,6 +227,30 @@ func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } +// RecordDigestCompressorName records that the blob with digest anyDigest was compressed with the specified +// compressor, or is blobinfocache.Uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (bdc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(digestCompressorBucket) + if err != nil { + return err + } + key := []byte(anyDigest.String()) + if previousBytes := b.Get(key); previousBytes != nil { + if string(previousBytes) != compressorName { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, string(previousBytes), compressorName) + } + } + if compressorName == blobinfocache.UnknownCompression { + return b.Delete(key) + } + return b.Put(key, []byte(compressorName)) + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, // and can be reused given the opaque location data. func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { @@ -251,21 +282,34 @@ func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope type }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } -// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates. -func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime { - b := scopeBucket.Bucket([]byte(digest.String())) +// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates. +func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { + digestKey := []byte(digest.String()) + b := scopeBucket.Bucket(digestKey) if b == nil { return candidates } + compressorName := blobinfocache.UnknownCompression + if compressionBucket != nil { + // the bucket won't exist if the cache was created by a v1 implementation and + // hasn't yet been updated by a v2 implementation + if compressorNameValue := compressionBucket.Get(digestKey); len(compressorNameValue) > 0 { + compressorName = string(compressorNameValue) + } + } + if compressorName == blobinfocache.UnknownCompression && requireCompressionInfo { + return candidates + } _ = b.ForEach(func(k, v []byte) error { t := time.Time{} if err := t.UnmarshalBinary(v); err != nil { return err } candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: types.BICReplacementCandidate{ - Digest: digest, - Location: types.BICLocationReference{Opaque: string(k)}, + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: types.BICLocationReference{Opaque: string(k)}, }, LastSeen: t, }) @@ -274,13 +318,17 @@ func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW return candidates } -// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. -func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { +func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { + return bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, true) +} + +func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { res := []prioritize.CandidateWithTime{} var uncompressedDigestValue digest.Digest // = "" if err := bdc.view(func(tx *bolt.Tx) error { @@ -296,8 +344,11 @@ func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types if scopeBucket == nil { return nil } + // compressionBucket won't have been created if previous writers never recorded info about compression, + // and we don't want to fail just because of that + compressionBucket := tx.Bucket(digestCompressorBucket) - res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, requireCompressionInfo) if canSubstitute { if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { b := tx.Bucket(digestByUncompressedBucket) @@ -310,7 +361,7 @@ func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types return err } if d != primaryDigest && d != uncompressedDigestValue { - res = bdc.appendReplacementCandidates(res, scopeBucket, d) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, requireCompressionInfo) } return nil }); err != nil { @@ -319,14 +370,24 @@ func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types } } if uncompressedDigestValue != primaryDigest { - res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, requireCompressionInfo) } } } return nil }); err != nil { // Including os.IsNotExist(err) - return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)? + return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)? } return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue) } + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, false)) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go index 5deca4a82..6f5506d94 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -6,7 +6,7 @@ import ( "sort" "time" - "github.com/containers/image/v5/types" + "github.com/containers/image/v5/internal/blobinfocache" "github.com/opencontainers/go-digest" ) @@ -17,8 +17,8 @@ const replacementAttempts = 5 // CandidateWithTime is the input to types.BICReplacementCandidate prioritization. type CandidateWithTime struct { - Candidate types.BICReplacementCandidate // The replacement candidate - LastSeen time.Time // Time the candidate was last known to exist (either read or written) + Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate + LastSeen time.Time // Time the candidate was last known to exist (either read or written) } // candidateSortState is a local state implementing sort.Interface on candidates to prioritize, @@ -79,7 +79,7 @@ func (css *candidateSortState) Swap(i, j int) { // destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the // number of entries to limit, only to make testing simpler. -func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate { +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 { // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should // compare equal. sort.Sort(&candidateSortState{ @@ -92,7 +92,7 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, if resLength > maxCandidates { resLength = maxCandidates } - res := make([]types.BICReplacementCandidate, resLength) + res := make([]blobinfocache.BICReplacementCandidate2, resLength) for i := range res { res[i] = cs[i].Candidate } @@ -105,6 +105,6 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, // // WARNING: The array of candidates is destructively modified. (The implementation of this function could of course // make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) -func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate { +func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 { return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index 8f28c6623..3d598057e 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" @@ -25,6 +26,7 @@ type cache struct { uncompressedDigests map[digest.Digest]digest.Digest digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference + compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest } // New returns a BlobInfoCache implementation which is in-memory only. @@ -36,10 +38,15 @@ type cache struct { // Manual users of types.{ImageSource,ImageDestination} might also use // this instead of a persistent cache. func New() types.BlobInfoCache { + return new2() +} + +func new2() *cache { return &cache{ uncompressedDigests: map[digest.Digest]digest.Digest{}, digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, + compressors: map[digest.Digest]string{}, } } @@ -101,14 +108,34 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type locationScope[location] = time.Now() // Possibly overwriting an older entry. } +// RecordDigestCompressorName records that the blob with the specified digest is either compressed with the specified +// algorithm, or uncompressed, or that we no longer know. +func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if compressorName == blobinfocache.UnknownCompression { + delete(mem.compressors, blobDigest) + return + } + mem.compressors[blobDigest] = compressorName +} + // appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. -func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime { +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present for l, t := range locations { + compressorName, compressorKnown := mem.compressors[digest] + if !compressorKnown { + if requireCompressionInfo { + continue + } + compressorName = blobinfocache.UnknownCompression + } candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: types.BICReplacementCandidate{ - Digest: digest, - Location: l, + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: l, }, LastSeen: t, }) @@ -123,21 +150,35 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false)) +} + +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { + return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true) +} + +func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { mem.mutex.Lock() defer mem.mutex.Unlock() res := []prioritize.CandidateWithTime{} - res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest) + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo) var uncompressedDigest digest.Digest // = "" if canSubstitute { if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map for d := range otherDigests { if d != primaryDigest && d != uncompressedDigest { - res = mem.appendReplacementCandidates(res, transport, scope, d) + res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo) } } if uncompressedDigest != primaryDigest { - res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest) + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo) } } } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go index fa1879afd..2a54ff312 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go @@ -2,6 +2,7 @@ package none import ( + "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" ) @@ -16,7 +17,7 @@ type noCache struct { // Manifest.Inspect, because configs only have one representation. // Any use of BlobInfoCache with blobs should usually use at least a // short-lived cache, ideally blobinfocache.DefaultCache. -var NoCache types.BlobInfoCache = noCache{} +var NoCache blobinfocache.BlobInfoCache2 = blobinfocache.FromBlobInfoCache(&noCache{}) // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. // May return anyDigest if it is known to be uncompressed. diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index cf82ee861..983df41d8 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -86,7 +86,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Note: we need to read the auth files in the inverse order to prevent // a priority inversion when writing to the map. authConfigs := make(map[string]types.DockerAuthConfig) - paths := getAuthFilePaths(sys) + paths := getAuthFilePaths(sys, homedir.Get()) for i := len(paths) - 1; i >= 0; i-- { path := paths[i] // readJSONFile returns an empty map in case the path doesn't exist. @@ -126,7 +126,9 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // getAuthFilePaths returns a slice of authPaths based on the system context // in the order they should be searched. Note that some paths may not exist. -func getAuthFilePaths(sys *types.SystemContext) []authPath { +// The homeDir parameter should always be homedir.Get(), and is only intended to be overridden +// by tests. +func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath { paths := []authPath{} pathToAuth, lf, err := getPathToAuth(sys) if err == nil { @@ -139,7 +141,7 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath { } xdgCfgHome := os.Getenv("XDG_CONFIG_HOME") if xdgCfgHome == "" { - xdgCfgHome = filepath.Join(homedir.Get(), ".config") + xdgCfgHome = filepath.Join(homeDir, ".config") } paths = append(paths, authPath{path: filepath.Join(xdgCfgHome, xdgConfigHomePath), legacyFormat: false}) if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" { @@ -148,11 +150,11 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath { ) } else { paths = append(paths, - authPath{path: filepath.Join(homedir.Get(), dockerHomePath), legacyFormat: false}, + authPath{path: filepath.Join(homeDir, dockerHomePath), legacyFormat: false}, ) } paths = append(paths, - authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true}, + authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true}, ) return paths } @@ -161,6 +163,12 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath { // file or .docker/config.json, including support for OAuth2 and IdentityToken. // If an entry is not found, an empty struct is returned. func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) { + return getCredentialsWithHomeDir(sys, registry, homedir.Get()) +} + +// getCredentialsWithHomeDir is an internal implementation detail of GetCredentials, +// it exists only to allow testing it with an artificial home directory. +func getCredentialsWithHomeDir(sys *types.SystemContext, registry, homeDir string) (types.DockerAuthConfig, error) { if sys != nil && sys.DockerAuthConfig != nil { logrus.Debug("Returning credentials from DockerAuthConfig") return *sys.DockerAuthConfig, nil @@ -177,7 +185,7 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth } } - for _, path := range getAuthFilePaths(sys) { + for _, path := range getAuthFilePaths(sys, homeDir) { authConfig, err := findAuthentication(registry, path.path, path.legacyFormat) if err != nil { logrus.Debugf("Credentials not found") @@ -203,7 +211,13 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth // GetCredentials API. The new API should be used and this API is kept to // maintain backward compatibility. func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { - auth, err := GetCredentials(sys, registry) + return getAuthenticationWithHomeDir(sys, registry, homedir.Get()) +} + +// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication, +// it exists only to allow testing it with an artificial home directory. +func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir string) (string, string, error) { + auth, err := getCredentialsWithHomeDir(sys, registry, homeDir) if err != nil { return "", "", err } @@ -262,6 +276,12 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { // getPathToAuth gets the path of the auth.json file used for reading and writing credentials // returns the path, and a bool specifies whether the file is in legacy format func getPathToAuth(sys *types.SystemContext) (string, bool, error) { + return getPathToAuthWithOS(sys, runtime.GOOS) +} + +// getPathToAuthWithOS is an internal implementation detail of getPathToAuth, +// it exists only to allow testing it with an artificial runtime.GOOS. +func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, error) { if sys != nil { if sys.AuthFilePath != "" { return sys.AuthFilePath, false, nil @@ -273,7 +293,7 @@ func getPathToAuth(sys *types.SystemContext) (string, bool, error) { return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil } } - if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + if goOS == "windows" || goOS == "darwin" { return filepath.Join(homedir.Get(), nonLinuxAuthFilePath), false, nil } diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go index 198ac1cc6..f1e5c453e 100644 --- a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go @@ -149,9 +149,9 @@ const ( func (r *Resolved) Description() string { switch r.rationale { case rationaleAlias: - return fmt.Sprintf("Resolved short name %q to a recorded short-name alias (origin: %s)", r.userInput, r.originDescription) + return fmt.Sprintf("Resolved %q as an alias (%s)", r.userInput, r.originDescription) case rationaleUSR: - return fmt.Sprintf("Completed short name %q with unqualified-search registries (origin: %s)", r.userInput, r.originDescription) + return fmt.Sprintf("Resolving %q using unqualified-search registries (%s)", r.userInput, r.originDescription) case rationaleUserSelection, rationaleNone: fallthrough default: @@ -240,14 +240,14 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { // Create a copy of the system context to make it usable beyond this // function call. - var sys *types.SystemContext if ctx != nil { - sys = &(*ctx) + copy := *ctx + ctx = © } resolved.systemContext = ctx // Detect which mode we're running in. - mode, err := sysregistriesv2.GetShortNameMode(sys) + mode, err := sysregistriesv2.GetShortNameMode(ctx) if err != nil { return nil, err } @@ -276,7 +276,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { resolved.userInput = shortNameRepo // If there's already an alias, use it. - namedAlias, aliasOriginDescription, err := sysregistriesv2.ResolveShortNameAlias(sys, shortNameRepo.String()) + namedAlias, aliasOriginDescription, err := sysregistriesv2.ResolveShortNameAlias(ctx, shortNameRepo.String()) if err != nil { return nil, err } @@ -307,7 +307,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { resolved.rationale = rationaleUSR // Query the registry for unqualified-search registries. - unqualifiedSearchRegistries, usrConfig, err := sysregistriesv2.UnqualifiedSearchRegistriesWithOrigin(sys) + unqualifiedSearchRegistries, usrConfig, err := sysregistriesv2.UnqualifiedSearchRegistriesWithOrigin(ctx) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index 89ad7c533..3312237ef 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -20,7 +20,7 @@ import ( // systemRegistriesConfPath is the path to the system-wide registry // configuration file and is used to add/subtract potential registries for // obtaining images. You can override this at build time with -// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path' +// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfPath=$your_path' var systemRegistriesConfPath = builtinRegistriesConfPath // builtinRegistriesConfPath is the path to the registry configuration file. @@ -30,7 +30,7 @@ const builtinRegistriesConfPath = "/etc/containers/registries.conf" // systemRegistriesConfDirPath is the path to the system-wide registry // configuration directory and is used to add/subtract potential registries for // obtaining images. You can override this at build time with -// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfDirecotyPath=$your_path' +// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirecotyPath=$your_path' var systemRegistriesConfDirPath = builtinRegistriesConfDirPath // builtinRegistriesConfDirPath is the path to the registry configuration directory. @@ -405,9 +405,15 @@ type configWrapper struct { // newConfigWrapper returns a configWrapper for the specified SystemContext. func newConfigWrapper(ctx *types.SystemContext) configWrapper { + return newConfigWrapperWithHomeDir(ctx, homedir.Get()) +} + +// newConfigWrapperWithHomeDir is an internal implementation detail of newConfigWrapper, +// it exists only to allow testing it with an artificial home directory. +func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) configWrapper { var wrapper configWrapper - userRegistriesFilePath := filepath.Join(homedir.Get(), userRegistriesFile) - userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir) + userRegistriesFilePath := filepath.Join(homeDir, userRegistriesFile) + userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir) // decide configPath using per-user path or system file if ctx != nil && ctx.SystemRegistriesConfPath != "" { diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go index d8cc4a09b..82fbb68cb 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -30,7 +30,7 @@ import ( // systemDefaultPolicyPath is the policy path used for DefaultPolicy(). // You can override this at build time with -// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path' +// -ldflags '-X github.com/containers/image/v5/signature.systemDefaultPolicyPath=$your_path' var systemDefaultPolicyPath = builtinDefaultPolicyPath // builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). @@ -59,10 +59,16 @@ func DefaultPolicy(sys *types.SystemContext) (*Policy, error) { // defaultPolicyPath returns a path to the default policy of the system. func defaultPolicyPath(sys *types.SystemContext) string { + return defaultPolicyPathWithHomeDir(sys, homedir.Get()) +} + +// defaultPolicyPathWithHomeDir is an internal implementation detail of defaultPolicyPath, +// it exists only to allow testing it with an artificial home directory. +func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) string { if sys != nil && sys.SignaturePolicyPath != "" { return sys.SignaturePolicyPath } - userPolicyFilePath := filepath.Join(homedir.Get(), userPolicyFile) + userPolicyFilePath := filepath.Join(homeDir, userPolicyFile) if _, err := os.Stat(userPolicyFilePath); err == nil { return userPolicyFilePath } diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index d24f8bbee..924d684ae 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -463,7 +463,9 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { diff --git a/vendor/github.com/containers/image/v5/tarball/doc.go b/vendor/github.com/containers/image/v5/tarball/doc.go index ead2d4263..e9d321b8f 100644 --- a/vendor/github.com/containers/image/v5/tarball/doc.go +++ b/vendor/github.com/containers/image/v5/tarball/doc.go @@ -5,11 +5,13 @@ // package main // // import ( -// "fmt" +// "context" // // cp "github.com/containers/image/v5/copy" +// "github.com/containers/image/v5/signature" // "github.com/containers/image/v5/tarball" // "github.com/containers/image/v5/transports/alltransports" +// "github.com/containers/image/v5/types" // imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" // ) // @@ -39,7 +41,18 @@ // if err != nil { // panic(err) // } -// err = cp.Image(nil, dest, src, nil) +// +// policy, err := signature.DefaultPolicy(nil) +// if err != nil { +// panic(err) +// } +// +// pc, err := signature.NewPolicyContext(policy) +// if err != nil { +// panic(err) +// } +// defer pc.Destroy() +// _, err = cp.Image(context.TODO(), pc, dest, src, nil) // if err != nil { // panic(err) // } diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 3c5126b4e..8655ca443 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -126,14 +126,18 @@ type BlobInfo struct { Annotations map[string]string MediaType string // CompressionOperation is used in Image.UpdateLayerInfos to instruct - // whether the original layer should be preserved or (de)compressed. The - // field defaults to preserve the original layer. + // whether the original layer's "compressed or not" should be preserved, + // possibly while changing the compression algorithm from one to another, + // or if it should be compressed or decompressed. The field defaults to + // preserve the original layer's compressedness. // TODO: To remove together with CryptoOperation in re-design to remove // field out out of BlobInfo. CompressionOperation LayerCompression // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be - // set when `CompressionOperation == Compress`. + // set when `CompressionOperation == Compress` and MAY be set when + // `CompressionOperation == PreserveOriginal` and the compression type is + // being changed for an already-compressed layer. CompressionAlgorithm *compression.Algorithm // CryptoOperation is used in Image.UpdateLayerInfos to instruct // whether the original layer was encrypted/decrypted @@ -194,6 +198,9 @@ type BICReplacementCandidate struct { // // None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; // users of the cache should just fall back to copying the blobs the usual way. +// +// The BlobInfoCache interface is deprecated. Consumers of this library should use one of the implementations provided by +// subpackages of the library's "pkg/blobinfocache" package in preference to implementing the interface on their own. type BlobInfoCache interface { // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. // May return anyDigest if it is known to be uncompressed. @@ -306,7 +313,9 @@ type ImageDestination interface { // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. - // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. + // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may + // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be + // reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) @@ -397,6 +406,12 @@ type Image interface { // UpdatedImage returns a types.Image modified according to options. // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. // This does not change the state of the original Image object. + // The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if + // manifests of type options.ManifestMIMEType can not include layers that are compressed + // in accordance with the CompressionOperation and CompressionAlgorithm specified in one + // or more options.LayerInfos items, though retrying with a different + // options.ManifestMIMEType or with different CompressionOperation+CompressionAlgorithm + // values might succeed. UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) // SupportsEncryption returns an indicator that the image supports encryption // @@ -600,6 +615,8 @@ type SystemContext struct { DockerDisableV1Ping bool // If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list DockerDisableDestSchema1MIMETypes bool + // If true, the physical pull source of docker transport images logged as info level + DockerLogMirrorChoice bool // Directory to use for OSTree temporary files OSTreeTmpDirPath string diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 48ecf938c..1fc775410 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 9 + VersionMinor = 10 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/klauspost/compress/flate/gen_inflate.go b/vendor/github.com/klauspost/compress/flate/gen_inflate.go deleted file mode 100644 index 35fc072a3..000000000 --- a/vendor/github.com/klauspost/compress/flate/gen_inflate.go +++ /dev/null @@ -1,294 +0,0 @@ -// +build generate - -//go:generate go run $GOFILE && gofmt -w inflate_gen.go - -package main - -import ( - "os" - "strings" -) - -func main() { - f, err := os.Create("inflate_gen.go") - if err != nil { - panic(err) - } - defer f.Close() - types := []string{"*bytes.Buffer", "*bytes.Reader", "*bufio.Reader", "*strings.Reader"} - names := []string{"BytesBuffer", "BytesReader", "BufioReader", "StringsReader"} - imports := []string{"bytes", "bufio", "io", "strings", "math/bits"} - f.WriteString(`// Code generated by go generate gen_inflate.go. DO NOT EDIT. - -package flate - -import ( -`) - - for _, imp := range imports { - f.WriteString("\t\"" + imp + "\"\n") - } - f.WriteString(")\n\n") - - template := ` - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) $FUNCNAME$() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.($TYPE$) - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := fr.ReadByte() - if err != nil { - f.b = b - f.nb = nb - f.err = noEOF(err) - return - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).$FUNCNAME$ - f.stepState = stateInit - return - } - goto readLiteral - case v == 256: - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for f.nb < n { - c, err := fr.ReadByte() - if err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - return - } - - var dist uint32 - if f.hd == nil { - for f.nb < 5 { - c, err := fr.ReadByte() - if err != nil { - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 - } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := fr.ReadByte() - if err != nil { - f.b = b - f.nb = nb - f.err = noEOF(err) - return - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { - c, err := fr.ReadByte() - if err != nil { - if debugDecode { - fmt.Println("morebits f.nb<nb:", err) - } - f.err = err - return - } - f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 - } - extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) - f.b >>= nb & regSizeMaskUint32 - f.nb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - default: - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { - if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).$FUNCNAME$ // We need to continue this work - f.stepState = stateDict - return - } - goto readLiteral - } -} - -` - for i, t := range types { - s := strings.Replace(template, "$FUNCNAME$", "huffman"+names[i], -1) - s = strings.Replace(s, "$TYPE$", t, -1) - f.WriteString(s) - } - f.WriteString("func (f *decompressor) huffmanBlockDecoder() func() {\n") - f.WriteString("\tswitch f.r.(type) {\n") - for i, t := range types { - f.WriteString("\t\tcase " + t + ":\n") - f.WriteString("\t\t\treturn f.huffman" + names[i] + "\n") - } - f.WriteString("\t\tdefault:\n") - f.WriteString("\t\t\treturn f.huffmanBlockGeneric") - f.WriteString("\t}\n}\n") -} diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md index e12da4db2..8b6e5c663 100644 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -14,7 +14,9 @@ but it can be used as a secondary step to compressors (like Snappy) that does no ## News
- * Mar 2018: First implementation released. Consider this beta software for now.
+This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package.
+
+This ensures that most functionality is well tested.
# Usage
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 62fd37324..1d41c25d2 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -5,7 +5,6 @@ package zstd import ( - "bytes" "errors" "io" "sync" @@ -179,11 +178,13 @@ func (d *Decoder) Reset(r io.Reader) error { } // If bytes buffer and < 1MB, do sync decoding anyway. - if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 { + if bb, ok := r.(byter); ok && bb.Len() < 1<<20 { + var bb2 byter + bb2 = bb if debug { println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) } - b := bb.Bytes() + b := bb2.Bytes() var dst []byte if cap(d.current.b) > 0 { dst = d.current.b diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 0c761dd62..9056beef2 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -4,6 +4,7 @@ package zstd import ( + "bytes" "errors" "log" "math" @@ -146,3 +147,10 @@ func load64(b []byte, i int) uint64 { return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 } + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml index 079af2431..8b2883f97 100644 --- a/vendor/github.com/onsi/ginkgo/.travis.yml +++ b/vendor/github.com/onsi/ginkgo/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - 1.13.x - 1.14.x + - 1.15.x - tip cache: @@ -16,10 +16,9 @@ install: - GO111MODULE="off" go get golang.org/x/tools/cmd/cover - GO111MODULE="off" go get github.com/onsi/gomega - GO111MODULE="off" go install github.com/onsi/ginkgo/ginkgo - - export PATH=$PATH:$HOME/gopath/bin + - export PATH=$GOPATH/bin:$PATH script: - - GO111MODULE="on" go mod tidy - - diff -u <(echo -n) <(git diff go.mod) - - diff -u <(echo -n) <(git diff go.sum) - - $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet + - GO111MODULE="on" go mod tidy && git diff --exit-code go.mod go.sum + - go vet + - ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md index a733f95fc..bf51fe9cd 100644 --- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md @@ -1,3 +1,14 @@ +## 1.15.0 + +### Features +- Adds 'outline' command to print the outline of specs/containers in a file (#754) [071c369] [6803cc3] [935b538] [06744e8] [0c40583] +- Add support for using template to generate tests (#752) [efb9e69] +- Add a Chinese Doc #755 (#756) [5207632] +- cli: allow multiple -focus and -skip flags (#736) [9a782fb] + +### Fixes +- Add _internal to filename of tests created with internal flag (#751) [43c12da] + ## 1.14.2 ### Fixes diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md index 475e04994..64e85eee0 100644 --- a/vendor/github.com/onsi/ginkgo/README.md +++ b/vendor/github.com/onsi/ginkgo/README.md @@ -2,7 +2,7 @@ [![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo) -Jump to the [docs](https://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! +Jump to the [docs](https://onsi.github.io/ginkgo/) | [中文文档](https://ke-chain.github.io/ginkgodoc) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! If you have a question, comment, bug report, feature request, etc. please open a GitHub issue, or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW). diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go index 3220c095c..8c177811e 100644 --- a/vendor/github.com/onsi/ginkgo/config/config.go +++ b/vendor/github.com/onsi/ginkgo/config/config.go @@ -20,14 +20,14 @@ import ( "fmt" ) -const VERSION = "1.14.2" +const VERSION = "1.15.0" type GinkgoConfigType struct { RandomSeed int64 RandomizeAllSpecs bool RegexScansFilePath bool - FocusString string - SkipString string + FocusStrings []string + SkipStrings []string SkipMeasurements bool FailOnPending bool FailFast bool @@ -65,6 +65,11 @@ func processPrefix(prefix string) string { return prefix } +type flagFunc func(string) + +func (f flagFunc) String() string { return "" } +func (f flagFunc) Set(s string) error { f(s); return nil } + func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { prefix = processPrefix(prefix) flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.") @@ -75,8 +80,8 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.") - flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.") - flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.") + flagSet.Var(flagFunc(flagFocus), prefix+"focus", "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed.") + flagSet.Var(flagFunc(flagSkip), prefix+"skip", "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed.") flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).") @@ -133,12 +138,12 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor result = append(result, fmt.Sprintf("--%sdryRun", prefix)) } - if ginkgo.FocusString != "" { - result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString)) + for _, s := range ginkgo.FocusStrings { + result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, s)) } - if ginkgo.SkipString != "" { - result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString)) + for _, s := range ginkgo.SkipStrings { + result = append(result, fmt.Sprintf("--%sskip=%s", prefix, s)) } if ginkgo.FlakeAttempts > 1 { @@ -211,3 +216,13 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor return result } + +// flagFocus implements the -focus flag. +func flagFocus(arg string) { + GinkgoConfig.FocusStrings = append(GinkgoConfig.FocusStrings, arg) +} + +// flagSkip implements the -skip flag. +func flagSkip(arg string) { + GinkgoConfig.SkipStrings = append(GinkgoConfig.SkipStrings, arg) +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go index 99557048a..288df7797 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go @@ -4,6 +4,7 @@ import ( "bytes" "flag" "fmt" + "io/ioutil" "os" "path/filepath" "strconv" @@ -12,11 +13,15 @@ import ( ) func BuildGenerateCommand() *Command { - var agouti, noDot, internal bool + var ( + agouti, noDot, internal bool + customTestFile string + ) flagSet := flag.NewFlagSet("generate", flag.ExitOnError) flagSet.BoolVar(&agouti, "agouti", false, "If set, generate will generate a test file for writing Agouti tests") flagSet.BoolVar(&noDot, "nodot", false, "If set, generate will generate a test file that does not . import ginkgo and gomega") flagSet.BoolVar(&internal, "internal", false, "If set, generate will generate a test file that uses the regular package name") + flagSet.StringVar(&customTestFile, "template", "", "If specified, generate will use the contents of the file passed as the test file template") return &Command{ Name: "generate", @@ -28,7 +33,7 @@ func BuildGenerateCommand() *Command { "Accepts the following flags:", }, Command: func(args []string, additionalArgs []string) { - generateSpec(args, agouti, noDot, internal) + generateSpec(args, agouti, noDot, internal, customTestFile) }, } } @@ -81,9 +86,9 @@ type specData struct { ImportPackage bool } -func generateSpec(args []string, agouti, noDot, internal bool) { +func generateSpec(args []string, agouti, noDot, internal bool, customTestFile string) { if len(args) == 0 { - err := generateSpecForSubject("", agouti, noDot, internal) + err := generateSpecForSubject("", agouti, noDot, internal, customTestFile) if err != nil { fmt.Println(err.Error()) fmt.Println("") @@ -95,7 +100,7 @@ func generateSpec(args []string, agouti, noDot, internal bool) { var failed bool for _, arg := range args { - err := generateSpecForSubject(arg, agouti, noDot, internal) + err := generateSpecForSubject(arg, agouti, noDot, internal, customTestFile) if err != nil { failed = true fmt.Println(err.Error()) @@ -107,13 +112,17 @@ func generateSpec(args []string, agouti, noDot, internal bool) { } } -func generateSpecForSubject(subject string, agouti, noDot, internal bool) error { +func generateSpecForSubject(subject string, agouti, noDot, internal bool, customTestFile string) error { packageName, specFilePrefix, formattedName := getPackageAndFormattedName() if subject != "" { specFilePrefix = formatSubject(subject) formattedName = prettifyPackageName(specFilePrefix) } + if internal { + specFilePrefix = specFilePrefix + "_internal" + } + data := specData{ Package: determinePackageName(packageName, internal), Subject: formattedName, @@ -136,7 +145,13 @@ func generateSpecForSubject(subject string, agouti, noDot, internal bool) error defer f.Close() var templateText string - if agouti { + if customTestFile != "" { + tpl, err := ioutil.ReadFile(customTestFile) + if err != nil { + panic(err.Error()) + } + templateText = string(tpl) + } else if agouti { templateText = agoutiSpecText } else { templateText = specText diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/ginkgo/main.go index f60c48a72..ac725bf40 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/main.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo/main.go @@ -111,6 +111,11 @@ will output an executable file named `package.test`. This can be run directly o ginkgo <path-to-package.test> + +To print an outline of Ginkgo specs and containers in a file: + + gingko outline <filename> + To print out Ginkgo's version: ginkgo version @@ -172,6 +177,7 @@ func init() { Commands = append(Commands, BuildUnfocusCommand()) Commands = append(Commands, BuildVersionCommand()) Commands = append(Commands, BuildHelpCommand()) + Commands = append(Commands, BuildOutlineCommand()) } func main() { diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/ginkgo/outline/ginkgo.go new file mode 100644 index 000000000..ce6b7fcd7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/outline/ginkgo.go @@ -0,0 +1,243 @@ +package outline + +import ( + "go/ast" + "go/token" + "strconv" +) + +const ( + // undefinedTextAlt is used if the spec/container text cannot be derived + undefinedTextAlt = "undefined" +) + +// ginkgoMetadata holds useful bits of information for every entry in the outline +type ginkgoMetadata struct { + // Name is the spec or container function name, e.g. `Describe` or `It` + Name string `json:"name"` + + // Text is the `text` argument passed to specs, and some containers + Text string `json:"text"` + + // Start is the position of first character of the spec or container block + Start int `json:"start"` + + // End is the position of first character immediately after the spec or container block + End int `json:"end"` + + Spec bool `json:"spec"` + Focused bool `json:"focused"` + Pending bool `json:"pending"` +} + +// ginkgoNode is used to construct the outline as a tree +type ginkgoNode struct { + ginkgoMetadata + Nodes []*ginkgoNode `json:"nodes"` +} + +type walkFunc func(n *ginkgoNode) + +func (n *ginkgoNode) PreOrder(f walkFunc) { + f(n) + for _, m := range n.Nodes { + m.PreOrder(f) + } +} + +func (n *ginkgoNode) PostOrder(f walkFunc) { + for _, m := range n.Nodes { + m.PostOrder(f) + } + f(n) +} + +func (n *ginkgoNode) Walk(pre, post walkFunc) { + pre(n) + for _, m := range n.Nodes { + m.Walk(pre, post) + } + post(n) +} + +// PropagateInheritedProperties propagates the Pending and Focused properties +// through the subtree rooted at n. +func (n *ginkgoNode) PropagateInheritedProperties() { + n.PreOrder(func(thisNode *ginkgoNode) { + for _, descendantNode := range thisNode.Nodes { + if thisNode.Pending { + descendantNode.Pending = true + descendantNode.Focused = false + } + if thisNode.Focused && !descendantNode.Pending { + descendantNode.Focused = true + } + } + }) +} + +// BackpropagateUnfocus propagates the Focused property through the subtree +// rooted at n. It applies the rule described in the Ginkgo docs: +// > Nested programmatically focused specs follow a simple rule: if a +// > leaf-node is marked focused, any of its ancestor nodes that are marked +// > focus will be unfocused. +func (n *ginkgoNode) BackpropagateUnfocus() { + focusedSpecInSubtreeStack := []bool{} + n.PostOrder(func(thisNode *ginkgoNode) { + if thisNode.Spec { + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused) + return + } + focusedSpecInSubtree := false + for range thisNode.Nodes { + focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1] + focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1] + } + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree) + if focusedSpecInSubtree { + thisNode.Focused = false + } + }) + +} + +func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) { + switch ex := ce.Fun.(type) { + case *ast.Ident: + return "", ex.Name, true + case *ast.SelectorExpr: + pkgID, ok := ex.X.(*ast.Ident) + if !ok { + return "", "", false + } + // A package identifier is top-level, so Obj must be nil + if pkgID.Obj != nil { + return "", "", false + } + if ex.Sel == nil { + return "", "", false + } + return pkgID.Name, ex.Sel.Name, true + default: + return "", "", false + } +} + +// absoluteOffsetsForNode derives the absolute character offsets of the node start and +// end positions. +func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) { + return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset +} + +// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree +// corresponding to a Ginkgo container or spec. +func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName, tablePackageName *string) (*ginkgoNode, bool) { + packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce) + if !ok { + return nil, false + } + + n := ginkgoNode{} + n.Name = identName + n.Start, n.End = absoluteOffsetsForNode(fset, ce) + n.Nodes = make([]*ginkgoNode, 0) + switch identName { + case "It", "Measure", "Specify": + n.Spec = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "Entry": + n.Spec = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, tablePackageName != nil && *tablePackageName == packageName + case "FIt", "FMeasure", "FSpecify": + n.Spec = true + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FEntry": + n.Spec = true + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, tablePackageName != nil && *tablePackageName == packageName + case "PIt", "PMeasure", "PSpecify", "XIt", "XMeasure", "XSpecify": + n.Spec = true + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PEntry", "XEntry": + n.Spec = true + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, tablePackageName != nil && *tablePackageName == packageName + case "Context", "Describe", "When": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "DescribeTable": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, tablePackageName != nil && *tablePackageName == packageName + case "FContext", "FDescribe", "FWhen": + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FDescribeTable": + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, tablePackageName != nil && *tablePackageName == packageName + case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen": + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PDescribeTable", "XDescribeTable": + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, tablePackageName != nil && *tablePackageName == packageName + case "By": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterEach", "BeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "JustAfterEach", "JustBeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterSuite", "BeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "SynchronizedAfterSuite", "SynchronizedBeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + default: + return nil, false + } +} + +// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or +// container. If it cannot derive it, it returns the alt text. +func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string { + text, defined := textFromCallExpr(ce) + if !defined { + return alt + } + return text +} + +// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If +// it cannot derive it, it returns false. +func textFromCallExpr(ce *ast.CallExpr) (string, bool) { + if len(ce.Args) < 1 { + return "", false + } + text, ok := ce.Args[0].(*ast.BasicLit) + if !ok { + return "", false + } + switch text.Kind { + case token.CHAR, token.STRING: + // For token.CHAR and token.STRING, Value is quoted + unquoted, err := strconv.Unquote(text.Value) + if err != nil { + // If unquoting fails, just use the raw Value + return text.Value, true + } + return unquoted, true + default: + return text.Value, true + } +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/ginkgo/outline/import.go new file mode 100644 index 000000000..4328ab391 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/outline/import.go @@ -0,0 +1,65 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Most of the required functions were available in the +// "golang.org/x/tools/go/ast/astutil" package, but not exported. +// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go + +package outline + +import ( + "go/ast" + "strconv" + "strings" +) + +// packageNameForImport returns the package name for the package. If the package +// is not imported, it returns nil. "Package name" refers to `pkgname` in the +// call expression `pkgname.ExportedIdentifier`. Examples: +// (import path not found) -> nil +// "import example.com/pkg/foo" -> "foo" +// "import fooalias example.com/pkg/foo" -> "fooalias" +// "import . example.com/pkg/foo" -> "" +func packageNameForImport(f *ast.File, path string) *string { + spec := importSpec(f, path) + if spec == nil { + return nil + } + name := spec.Name.String() + if name == "<nil>" { + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + } + if name == "." { + name = "" + } + return &name +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/outline/outline.go b/vendor/github.com/onsi/ginkgo/ginkgo/outline/outline.go new file mode 100644 index 000000000..242e6a109 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/outline/outline.go @@ -0,0 +1,107 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/ast/inspector" +) + +const ( + // ginkgoImportPath is the well-known ginkgo import path + ginkgoImportPath = "github.com/onsi/ginkgo" + + // tableImportPath is the well-known table extension import path + tableImportPath = "github.com/onsi/ginkgo/extensions/table" +) + +// FromASTFile returns an outline for a Ginkgo test source file +func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) { + ginkgoPackageName := packageNameForImport(src, ginkgoImportPath) + tablePackageName := packageNameForImport(src, tableImportPath) + if ginkgoPackageName == nil && tablePackageName == nil { + return nil, fmt.Errorf("file does not import %q or %q", ginkgoImportPath, tableImportPath) + } + + root := ginkgoNode{} + stack := []*ginkgoNode{&root} + ispr := inspector.New([]*ast.File{src}) + ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool { + if push { + // Pre-order traversal + ce, ok := node.(*ast.CallExpr) + if !ok { + // Because `Nodes` calls this function only when the node is an + // ast.CallExpr, this should never happen + panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End())) + } + gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName, tablePackageName) + if !ok { + // Node is not a Ginkgo spec or container, continue + return true + } + parent := stack[len(stack)-1] + parent.Nodes = append(parent.Nodes, gn) + stack = append(stack, gn) + return true + } + // Post-order traversal + start, end := absoluteOffsetsForNode(fset, node) + lastVisitedGinkgoNode := stack[len(stack)-1] + if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End { + // Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue + return true + } + stack = stack[0 : len(stack)-1] + return true + }) + if len(root.Nodes) == 0 { + return &outline{[]*ginkgoNode{}}, nil + } + + // Derive the final focused property for all nodes. This must be done + // _before_ propagating the inherited focused property. + root.BackpropagateUnfocus() + // Now, propagate inherited properties, including focused and pending. + root.PropagateInheritedProperties() + + return &outline{root.Nodes}, nil +} + +type outline struct { + Nodes []*ginkgoNode `json:"nodes"` +} + +func (o *outline) MarshalJSON() ([]byte, error) { + return json.Marshal(o.Nodes) +} + +// String returns a CSV-formatted outline. Spec or container are output in +// depth-first order. +func (o *outline) String() string { + return o.StringIndent(0) +} + +// StringIndent returns a CSV-formated outline, but every line is indented by +// one 'width' of spaces for every level of nesting. +func (o *outline) StringIndent(width int) string { + var b strings.Builder + b.WriteString("Name,Text,Start,End,Spec,Focused,Pending\n") + + currentIndent := 0 + pre := func(n *ginkgoNode) { + b.WriteString(fmt.Sprintf("%*s", currentIndent, "")) + b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending)) + currentIndent += width + } + post := func(n *ginkgoNode) { + currentIndent -= width + } + for _, n := range o.Nodes { + n.Walk(pre, post) + } + return b.String() +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/outline_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/outline_command.go new file mode 100644 index 000000000..96ca7ad27 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo/outline_command.go @@ -0,0 +1,95 @@ +package main + +import ( + "encoding/json" + "flag" + "fmt" + "go/parser" + "go/token" + "os" + + "github.com/onsi/ginkgo/ginkgo/outline" +) + +const ( + // indentWidth is the width used by the 'indent' output + indentWidth = 4 + // stdinAlias is a portable alias for stdin. This convention is used in + // other CLIs, e.g., kubectl. + stdinAlias = "-" + usageCommand = "ginkgo outline <filename>" +) + +func BuildOutlineCommand() *Command { + const defaultFormat = "csv" + var format string + flagSet := flag.NewFlagSet("outline", flag.ExitOnError) + flagSet.StringVar(&format, "format", defaultFormat, "Format of outline. Accepted: 'csv', 'indent', 'json'") + return &Command{ + Name: "outline", + FlagSet: flagSet, + UsageCommand: usageCommand, + Usage: []string{ + "Create an outline of Ginkgo symbols for a file", + "To read from stdin, use: `ginkgo outline -`", + "Accepts the following flags:", + }, + Command: func(args []string, additionalArgs []string) { + outlineFile(args, format) + }, + } +} + +func outlineFile(args []string, format string) { + if len(args) != 1 { + println(fmt.Sprintf("usage: %s", usageCommand)) + os.Exit(1) + } + + filename := args[0] + var src *os.File + if filename == stdinAlias { + src = os.Stdin + } else { + var err error + src, err = os.Open(filename) + if err != nil { + println(fmt.Sprintf("error opening file: %s", err)) + os.Exit(1) + } + } + + fset := token.NewFileSet() + + parsedSrc, err := parser.ParseFile(fset, filename, src, 0) + if err != nil { + println(fmt.Sprintf("error parsing source: %s", err)) + os.Exit(1) + } + + o, err := outline.FromASTFile(fset, parsedSrc) + if err != nil { + println(fmt.Sprintf("error creating outline: %s", err)) + os.Exit(1) + } + + var oerr error + switch format { + case "csv": + _, oerr = fmt.Print(o) + case "indent": + _, oerr = fmt.Print(o.StringIndent(indentWidth)) + case "json": + b, err := json.Marshal(o) + if err != nil { + println(fmt.Sprintf("error marshalling to json: %s", err)) + } + _, oerr = fmt.Println(string(b)) + default: + complainAndQuit(fmt.Sprintf("format %s not accepted", format)) + } + if oerr != nil { + println(fmt.Sprintf("error writing outline: %s", oerr)) + os.Exit(1) + } +} diff --git a/vendor/github.com/onsi/ginkgo/go.mod b/vendor/github.com/onsi/ginkgo/go.mod index 1f7125228..655060cf7 100644 --- a/vendor/github.com/onsi/ginkgo/go.mod +++ b/vendor/github.com/onsi/ginkgo/go.mod @@ -4,8 +4,8 @@ require ( github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/nxadm/tail v1.4.4 github.com/onsi/gomega v1.10.1 - golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 - golang.org/x/text v0.3.2 // indirect + golang.org/x/sys v0.0.0-20210112080510-489259a85091 + golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e ) go 1.13 diff --git a/vendor/github.com/onsi/ginkgo/go.sum b/vendor/github.com/onsi/ginkgo/go.sum index 2b774f3e8..56a493f9d 100644 --- a/vendor/github.com/onsi/ginkgo/go.sum +++ b/vendor/github.com/onsi/ginkgo/go.sum @@ -1,8 +1,6 @@ -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -15,39 +13,50 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -57,11 +66,9 @@ google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyz google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go deleted file mode 100644 index e3d09eadb..000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build darwin - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go deleted file mode 100644 index 72d38686a..000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build dragonfly - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go deleted file mode 100644 index 497d548d9..000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build freebsd - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go deleted file mode 100644 index 29add0d33..000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build linux -// +build !mips64le - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go deleted file mode 100644 index 09bd06260..000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build linux -// +build mips64le - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup3(oldfd, newfd, 0) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go deleted file mode 100644 index 16ad6aeb2..000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build netbsd - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go deleted file mode 100644 index 4275f8421..000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build openbsd - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go deleted file mode 100644 index 882a38a9e..000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build solaris - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go index 80614d0ce..774967db6 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go @@ -8,6 +8,7 @@ import ( "os" "github.com/nxadm/tail" + "golang.org/x/sys/unix" ) func NewOutputInterceptor() OutputInterceptor { @@ -35,8 +36,10 @@ func (interceptor *outputInterceptor) StartInterceptingOutput() error { return err } - interceptorDupx(int(interceptor.redirectFile.Fd()), 1) - interceptorDupx(int(interceptor.redirectFile.Fd()), 2) + // This might call Dup3 if the dup2 syscall is not available, e.g. on + // linux/arm64 or linux/riscv64 + unix.Dup2(int(interceptor.redirectFile.Fd()), 1) + unix.Dup2(int(interceptor.redirectFile.Fd()), 2) if interceptor.streamTarget != nil { interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true}) diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go index 8a2007137..0a24139fb 100644 --- a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go +++ b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go @@ -4,6 +4,7 @@ import ( "math/rand" "regexp" "sort" + "strings" ) type Specs struct { @@ -46,11 +47,11 @@ func (e *Specs) Shuffle(r *rand.Rand) { e.names = names } -func (e *Specs) ApplyFocus(description string, focusString string, skipString string) { - if focusString == "" && skipString == "" { +func (e *Specs) ApplyFocus(description string, focus, skip []string) { + if len(focus)+len(skip) == 0 { e.applyProgrammaticFocus() } else { - e.applyRegExpFocusAndSkip(description, focusString, skipString) + e.applyRegExpFocusAndSkip(description, focus, skip) } } @@ -90,14 +91,13 @@ func (e *Specs) toMatch(description string, i int) []byte { } } -func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, skipString string) { - var focusFilter *regexp.Regexp - if focusString != "" { - focusFilter = regexp.MustCompile(focusString) +func (e *Specs) applyRegExpFocusAndSkip(description string, focus, skip []string) { + var focusFilter, skipFilter *regexp.Regexp + if len(focus) > 0 { + focusFilter = regexp.MustCompile(strings.Join(focus, "|")) } - var skipFilter *regexp.Regexp - if skipString != "" { - skipFilter = regexp.MustCompile(skipString) + if len(skip) > 0 { + skipFilter = regexp.MustCompile(strings.Join(skip, "|")) } for i, spec := range e.specs { diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go index e75da1f89..b4a83c432 100644 --- a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go +++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go @@ -97,7 +97,7 @@ func (suite *Suite) generateSpecsIterator(description string, config config.Gink specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed))) } - specs.ApplyFocus(description, config.FocusString, config.SkipString) + specs.ApplyFocus(description, config.FocusStrings, config.SkipStrings) if config.SkipMeasurements { specs.SkipMeasurements() diff --git a/vendor/github.com/ulikunitz/xz/SECURITY.md b/vendor/github.com/ulikunitz/xz/SECURITY.md new file mode 100644 index 000000000..5f7ec01b3 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/SECURITY.md @@ -0,0 +1,10 @@ +# Security Policy + +## Supported Versions + +Currently the last minor version v0.5.x is supported. + +## Reporting a Vulnerability + +Report a vulnerability by creating a Github issue at +<https://github.com/ulikunitz/xz/issues>. Expect a response in a week. diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md index 84bd5dcbd..88c7341c8 100644 --- a/vendor/github.com/ulikunitz/xz/TODO.md +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -8,19 +8,17 @@ 1. Review encoder and check for lzma improvements under xz. 2. Fix binary tree matcher. -3. Compare compression ratio with xz tool using comparable parameters - and optimize parameters -4. Do some optimizations - - rename operation action and make it a simple type of size 8 - - make maxMatches, wordSize parameters - - stop searching after a certain length is found (parameter sweetLen) +3. Compare compression ratio with xz tool using comparable parameters and optimize parameters +4. rename operation action and make it a simple type of size 8 +5. make maxMatches, wordSize parameters +6. stop searching after a certain length is found (parameter sweetLen) ## Release v0.7 1. Optimize code 2. Do statistical analysis to get linear presets. 3. Test sync.Pool compatability for xz and lzma Writer and Reader -3. Fuzz optimized code. +4. Fuzz optimized code. ## Release v0.8 @@ -44,53 +42,73 @@ ## Package lzma -### Release v0.6 - -- Rewrite Encoder into a simple greedy one-op-at-a-time encoder - including - + simple scan at the dictionary head for the same byte - + use the killer byte (requiring matches to get longer, the first - test should be the byte that would make the match longer) +### v0.6 +* Rewrite Encoder into a simple greedy one-op-at-a-time encoder including + * simple scan at the dictionary head for the same byte + * use the killer byte (requiring matches to get longer, the first test should be the byte that would make the match longer) ## Optimizations -- There may be a lot of false sharing in lzma.State; check whether this - can be improved by reorganizing the internal structure of it. -- Check whether batching encoding and decoding improves speed. +* There may be a lot of false sharing in lzma. State; check whether this can be improved by reorganizing the internal structure of it. + +* Check whether batching encoding and decoding improves speed. ### DAG optimizations -- Use full buffer to create minimal bit-length above range encoder. -- Might be too slow (see v0.4) +* Use full buffer to create minimal bit-length above range encoder. +* Might be too slow (see v0.4) ### Different match finders -- hashes with 2, 3 characters additional to 4 characters -- binary trees with 2-7 characters (uint64 as key, use uint32 as +* hashes with 2, 3 characters additional to 4 characters +* binary trees with 2-7 characters (uint64 as key, use uint32 as + pointers into a an array) -- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + +* rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + into an array with bit-steeling for the colors) ## Release Procedure -- execute goch -l for all packages; probably with lower param like 0.5. -- check orthography with gospell -- Write release notes in doc/relnotes. -- Update README.md -- xb copyright . in xz directory to ensure all new files have Copyright - header -- VERSION=<version> go generate github.com/ulikunitz/xz/... to update - version files -- Execute test for Linux/amd64, Linux/x86 and Windows/amd64. -- Update TODO.md - write short log entry -- git checkout master && git merge dev -- git tag -a <version> -- git push +* execute goch -l for all packages; probably with lower param like 0.5. +* check orthography with gospell +* Write release notes in doc/relnotes. +* Update README.md +* xb copyright . in xz directory to ensure all new files have Copyright header +* `VERSION=<version> go generate github.com/ulikunitz/xz/...` to update version files +* Execute test for Linux/amd64, Linux/x86 and Windows/amd64. +* Update TODO.md - write short log entry +* `git checkout master && git merge dev` +* `git tag -a <version>` +* `git push` ## Log -## 2020-08-19 +### 2020-12-17 + +Release v0.5.9 fixes warnings, a typo and adds SECURITY.md. + +One fix is interesting. + +```go +const ( + a byte = 0x1 + b = 0x2 +) +``` + +The constants a and b don't have the same type. Correct is + +```go +const ( + a byte = 0x1 + b byte = 0x2 +) +``` + +### 2020-08-19 Release v0.5.8 fixes issue [issue #35](https://github.com/ulikunitz/xz/issues/35). @@ -208,8 +226,8 @@ MININT. ### 2015-06-04 -It has been a productive day. I improved the interface of lzma.Reader -and lzma.Writer and fixed the error handling. +It has been a productive day. I improved the interface of lzma. Reader +and lzma. Writer and fixed the error handling. ### 2015-06-01 @@ -260,7 +278,7 @@ needed anymore. However I will implement a ReaderState and WriterState type to use static typing to ensure the right State object is combined with the -right lzbase.Reader and lzbase.Writer. +right lzbase. Reader and lzbase. Writer. As a start I have implemented ReaderState and WriterState to ensure that the state for reading is only used by readers and WriterState only @@ -282,11 +300,11 @@ old lzma package has been completely removed. ### 2015-04-05 -Implemented lzma.Reader and tested it. +Implemented lzma. Reader and tested it. ### 2015-04-04 -Implemented baseReader by adapting code form lzma.Reader. +Implemented baseReader by adapting code form lzma. Reader. ### 2015-04-03 @@ -302,7 +320,7 @@ However in Francesco Campoy's presentation "Go for Javaneros (Javaïstes?)" is the the idea that using an embedded field E, all the methods of E will be defined on T. If E is an interface T satisfies E. -https://talks.golang.org/2014/go4java.slide#51 +<https://talks.golang.org/2014/go4java.slide#51> I have never used this, but it seems to be a cool idea. @@ -327,11 +345,11 @@ and the opCodec. 1. Implemented simple lzmago tool 2. Tested tool against large 4.4G file - - compression worked correctly; tested decompression with lzma - - decompression hits a full buffer condition + * compression worked correctly; tested decompression with lzma + * decompression hits a full buffer condition 3. Fixed a bug in the compressor and wrote a test for it 4. Executed full cycle for 4.4 GB file; performance can be improved ;-) ### 2015-01-11 -- Release v0.2 because of the working LZMA encoder and decoder +* Release v0.2 because of the working LZMA encoder and decoder diff --git a/vendor/github.com/ulikunitz/xz/format.go b/vendor/github.com/ulikunitz/xz/format.go index edfec9a94..84b58c9dd 100644 --- a/vendor/github.com/ulikunitz/xz/format.go +++ b/vendor/github.com/ulikunitz/xz/format.go @@ -47,9 +47,9 @@ const HeaderLen = 12 // Constants for the checksum methods supported by xz. const ( None byte = 0x0 - CRC32 = 0x1 - CRC64 = 0x4 - SHA256 = 0xa + CRC32 byte = 0x1 + CRC64 byte = 0x4 + SHA256 byte = 0xa ) // errInvalidFlags indicates that flags are invalid. @@ -569,22 +569,6 @@ func readFilters(r io.Reader, count int) (filters []filter, err error) { return []filter{f}, err } -// writeFilters writes the filters. -func writeFilters(w io.Writer, filters []filter) (n int, err error) { - for _, f := range filters { - p, err := f.MarshalBinary() - if err != nil { - return n, err - } - k, err := w.Write(p) - n += k - if err != nil { - return n, err - } - } - return n, nil -} - /*** Index ***/ // record describes a block in the xz file index. diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go index 58d6a92a7..527ea19a7 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/bintree.go +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -5,10 +5,7 @@ package lzma import ( - "bufio" "errors" - "fmt" - "io" "unicode" ) @@ -349,6 +346,7 @@ func dumpX(x uint32) string { return string(a) } +/* // dumpNode writes a representation of the node v into the io.Writer. func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { if v == null { @@ -377,6 +375,7 @@ func (t *binTree) dump(w io.Writer) error { t.dumpNode(bw, t.root, 0) return bw.Flush() } +*/ func (t *binTree) distance(v uint32) int { dist := int(t.front) - int(v) diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go index 2784ec6ba..d4309f97e 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/bitops.go +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -18,6 +18,7 @@ var ntz32Table = [32]int8{ 30, 17, 8, 14, 29, 13, 28, 27, } +/* // ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. func ntz32(x uint32) int { if x == 0 { @@ -26,6 +27,7 @@ func ntz32(x uint32) int { x = (x & -x) * ntz32Const return int(ntz32Table[x>>27]) } +*/ // nlz32 computes the number of leading zeros for an unsigned 32-bit integer. func nlz32(x uint32) int { diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go index e5a760a50..4b820792a 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/decoder.go +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -200,7 +200,7 @@ func (d *decoder) decompress() error { op, err := d.readOp() switch err { case nil: - break + // break case errEOS: d.eos = true if !d.rd.possiblyAtEnd() { diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go index ba06712b0..dd44e6625 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -126,10 +126,3 @@ func (d *decoderDict) Available() int { return d.buf.Available() } // Read reads data from the buffer contained in the decoder dictionary. func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } - -// Buffered returns the number of bytes currently buffered in the -// decoder dictionary. -func (d *decoderDict) buffered() int { return d.buf.Buffered() } - -// Peek gets data from the buffer without advancing the rear index. -func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) } diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go index e6e0c6ddf..064642831 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -4,21 +4,10 @@ package lzma -import "fmt" - // directCodec allows the encoding and decoding of values with a fixed number // of bits. The number of bits must be in the range [1,32]. type directCodec byte -// makeDirectCodec creates a directCodec. The function panics if the number of -// bits is not in the range [1,32]. -func makeDirectCodec(bits int) directCodec { - if !(1 <= bits && bits <= 32) { - panic(fmt.Errorf("bits=%d out of range", bits)) - } - return directCodec(bits) -} - // Bits returns the number of bits supported by this codec. func (dc directCodec) Bits() int { return int(dc) diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go index 69871c04a..9ed486d27 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -20,8 +20,6 @@ const ( posSlotBits = 6 // number of align bits alignBits = 4 - // maximum position slot - maxPosSlot = 63 ) // distCodec provides encoding and decoding of distance values. @@ -45,20 +43,6 @@ func (dc *distCodec) deepcopy(src *distCodec) { dc.alignCodec.deepcopy(&src.alignCodec) } -// distBits returns the number of bits required to encode dist. -func distBits(dist uint32) int { - if dist < startPosModel { - return 6 - } - // slot s > 3, dist d - // s = 2(bits(d)-1) + bit(d, bits(d)-2) - // s>>1 = bits(d)-1 - // bits(d) = 32-nlz32(d) - // s>>1=31-nlz32(d) - // n = 5 + (s>>1) = 36 - nlz32(d) - return 36 - nlz32(dist) -} - // newDistCodec creates a new distance codec. func (dc *distCodec) init() { for i := range dc.posSlotCodecs { diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go index 40f3d3f64..c36308d7c 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -19,7 +19,7 @@ type matcher interface { } // encoderDict provides the dictionary of the encoder. It includes an -// addtional buffer atop of the actual dictionary. +// additional buffer atop of the actual dictionary. type encoderDict struct { buf buffer m matcher diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go index cd148812c..ffeca35c3 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/header2.go +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -264,7 +264,7 @@ type chunkState byte // state const ( start chunkState = 'S' - stop = 'T' + stop chunkState = 'T' ) // errors for the chunk state handling diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go index 927395bd8..35b064064 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -56,19 +56,6 @@ func (lc *lengthCodec) init() { lc.high = makeTreeCodec(8) } -// lBits gives the number of bits used for the encoding of the l value -// provided to the range encoder. -func lBits(l uint32) int { - switch { - case l < 8: - return 4 - case l < 16: - return 5 - default: - return 10 - } -} - // Encode encodes the length offset. The length offset l can be compute by // subtracting minMatchLen (2) from the actual length. // diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go index ca31530fd..7b1ad1d9b 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -123,10 +123,3 @@ const ( minLP = 0 maxLP = 4 ) - -// minState and maxState define a range for the state values stored in -// the State values. -const ( - minState = 0 - maxState = 11 -) diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go index a75c9b46c..2f9b78ea5 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/operation.go +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -5,7 +5,6 @@ package lzma import ( - "errors" "fmt" "unicode" ) @@ -24,30 +23,6 @@ type match struct { n int } -// verify checks whether the match is valid. If that is not the case an -// error is returned. -func (m match) verify() error { - if !(minDistance <= m.distance && m.distance <= maxDistance) { - return errors.New("distance out of range") - } - if !(1 <= m.n && m.n <= maxMatchLen) { - return errors.New("length out of range") - } - return nil -} - -// l return the l-value for the match, which is the difference of length -// n and 2. -func (m match) l() uint32 { - return uint32(m.n - minMatchLen) -} - -// dist returns the dist value for the match, which is one less of the -// distance stored in the match. -func (m match) dist() uint32 { - return uint32(m.distance - minDistance) -} - // Len returns the number of bytes matched. func (m match) Len() int { return m.n diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go index 7189a0377..7b299abfe 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -131,32 +131,6 @@ type rangeDecoder struct { code uint32 } -// init initializes the range decoder, by reading from the byte reader. -func (d *rangeDecoder) init() error { - d.nrange = 0xffffffff - d.code = 0 - - b, err := d.br.ReadByte() - if err != nil { - return err - } - if b != 0 { - return errors.New("newRangeDecoder: first byte not zero") - } - - for i := 0; i < 4; i++ { - if err = d.updateCode(); err != nil { - return err - } - } - - if d.code >= d.nrange { - return errors.New("newRangeDecoder: d.code >= d.nrange") - } - - return nil -} - // newRangeDecoder initializes a range decoder. It reads five bytes from the // reader and therefore may return an error. func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go index 33074e624..e34c23f9c 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/reader2.go +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -48,7 +48,6 @@ type Reader2 struct { chunkReader io.Reader cstate chunkState - ctype chunkType } // NewReader2 creates a reader for an LZMA2 chunk sequence. diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go index 03f061cf1..fbe3a3942 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/state.go +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -53,12 +53,6 @@ func (s *state) Reset() { s.distCodec.init() } -// initState initializes the state. -func initState(s *state, p Properties) { - *s = state{Properties: p} - s.Reset() -} - // newState creates a new state from the give Properties. func newState(p Properties) *state { s := &state{Properties: p} diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go index 22cd6d500..795858914 100644 --- a/vendor/github.com/ulikunitz/xz/reader.go +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -26,13 +26,6 @@ type ReaderConfig struct { SingleStream bool } -// fill replaces all zero values with their default values. -func (c *ReaderConfig) fill() { - if c.DictCap == 0 { - c.DictCap = 8 * 1024 * 1024 - } -} - // Verify checks the reader parameters for Validity. Zero values will be // replaced by default values. func (c *ReaderConfig) Verify() error { @@ -165,9 +158,6 @@ func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) return r, nil } -// errIndex indicates an error with the xz file index. -var errIndex = errors.New("xz: error in xz file index") - // readTail reads the index body and the xz footer. func (r *streamReader) readTail() error { index, n, err := readIndexBody(r.xz) @@ -265,7 +255,6 @@ type blockReader struct { n int64 hash hash.Hash r io.Reader - err error } // newBlockReader creates a new block reader. @@ -315,10 +304,6 @@ func (br *blockReader) record() record { return record{br.unpaddedSize(), br.uncompressedSize()} } -// errBlockSize indicates that the size of the block in the block header -// is wrong. -var errBlockSize = errors.New("xz: wrong uncompressed size for block") - // Read reads data from the block. func (br *blockReader) Read(p []byte) (n int, err error) { n, err = br.r.Read(p) diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go index aec10dfa6..a9ed44912 100644 --- a/vendor/github.com/ulikunitz/xz/writer.go +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -6,6 +6,7 @@ package xz import ( "errors" + "fmt" "hash" "io" @@ -190,6 +191,9 @@ func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { return nil, err } data, err := w.h.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("w.h.MarshalBinary(): error %w", err) + } if _, err = xz.Write(data); err != nil { return nil, err } diff --git a/vendor/github.com/vbauerster/mpb/v5/.travis.yml b/vendor/github.com/vbauerster/mpb/v5/.travis.yml index 0eb0f2f20..9a203a67d 100644 --- a/vendor/github.com/vbauerster/mpb/v5/.travis.yml +++ b/vendor/github.com/vbauerster/mpb/v5/.travis.yml @@ -1,4 +1,7 @@ language: go +arch: + - amd64 + - ppc64le go: - 1.14.x diff --git a/vendor/github.com/vbauerster/mpb/v5/bar_option.go b/vendor/github.com/vbauerster/mpb/v5/bar_option.go index 31b7939b0..e7d2e41f9 100644 --- a/vendor/github.com/vbauerster/mpb/v5/bar_option.go +++ b/vendor/github.com/vbauerster/mpb/v5/bar_option.go @@ -123,13 +123,20 @@ func makeExtFunc(filler BarFiller) extFunc { } } -// TrimSpace trims bar's edge spaces. -func TrimSpace() BarOption { +// BarFillerTrim bar filler is rendered with leading and trailing space +// like ' [===] ' by default. With this option leading and trailing +// space will be removed. +func BarFillerTrim() BarOption { return func(s *bState) { s.trimSpace = true } } +// TrimSpace is an alias to BarFillerTrim. +func TrimSpace() BarOption { + return BarFillerTrim() +} + // BarStyle overrides mpb.DefaultBarStyle which is "[=>-]<+". // It's ok to pass string containing just 5 runes, for example "╢▌▌░╟", // if you don't need to override '<' (reverse tip) and '+' (refill rune). diff --git a/vendor/github.com/vbauerster/mpb/v5/go.mod b/vendor/github.com/vbauerster/mpb/v5/go.mod index 642bf0a5a..e80d1a10d 100644 --- a/vendor/github.com/vbauerster/mpb/v5/go.mod +++ b/vendor/github.com/vbauerster/mpb/v5/go.mod @@ -4,7 +4,7 @@ require ( github.com/VividCortex/ewma v1.1.1 github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/mattn/go-runewidth v0.0.9 - golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed + golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 ) go 1.14 diff --git a/vendor/github.com/vbauerster/mpb/v5/go.sum b/vendor/github.com/vbauerster/mpb/v5/go.sum index 7ad08f141..62cc10af0 100644 --- a/vendor/github.com/vbauerster/mpb/v5/go.sum +++ b/vendor/github.com/vbauerster/mpb/v5/go.sum @@ -4,5 +4,5 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpH github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed h1:WBkVNH1zd9jg/dK4HCM4lNANnmd12EHC9z+LmcCG4ns= -golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 h1:+CBz4km/0KPU3RGTwARGh/noP3bEwtHcq+0YcBQM2JQ= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/vbauerster/mpb/v5/progress.go b/vendor/github.com/vbauerster/mpb/v5/progress.go index ac1ce50ab..fb66ce05d 100644 --- a/vendor/github.com/vbauerster/mpb/v5/progress.go +++ b/vendor/github.com/vbauerster/mpb/v5/progress.go @@ -8,6 +8,7 @@ import ( "io" "io/ioutil" "log" + "math" "os" "sync" "time" @@ -40,7 +41,6 @@ type pState struct { pMatrix map[int][]chan int aMatrix map[int][]chan int barShutdownQueue []*Bar - barPopQueue []*Bar // following are provided/overrided by user idCount int @@ -179,7 +179,7 @@ func (p *Progress) BarCount() int { } } -// Wait waits far all bars to complete and finally shutdowns container. +// Wait waits for all bars to complete and finally shutdowns container. // After this method has been called, there is no way to reuse *Progress // instance. func (p *Progress) Wait() { @@ -301,27 +301,18 @@ func (s *pState) flush(cw *cwriter.Writer) error { delete(s.parkedBars, b) b.toDrop = true } + if s.popCompleted && !b.noPop { + lineCount -= b.extendedLines + 1 + b.toDrop = true + } if b.toDrop { delete(bm, b) s.heapUpdated = true - } else if s.popCompleted { - if b := b; !b.noPop { - defer func() { - s.barPopQueue = append(s.barPopQueue, b) - }() - } } b.cancel() } s.barShutdownQueue = s.barShutdownQueue[0:0] - for _, b := range s.barPopQueue { - delete(bm, b) - s.heapUpdated = true - lineCount -= b.extendedLines + 1 - } - s.barPopQueue = s.barPopQueue[0:0] - for b := range bm { heap.Push(&s.bHeap, b) } @@ -370,7 +361,7 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio } if s.popCompleted && !bs.noPop { - bs.priority = -1 + bs.priority = -(math.MaxInt32 - s.idCount) } bs.bufP = bytes.NewBuffer(make([]byte, 0, 128)) @@ -382,17 +373,18 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio func syncWidth(matrix map[int][]chan int) { for _, column := range matrix { - column := column - go func() { - var maxWidth int - for _, ch := range column { - if w := <-ch; w > maxWidth { - maxWidth = w - } - } - for _, ch := range column { - ch <- maxWidth - } - }() + go maxWidthDistributor(column) + } +} + +var maxWidthDistributor = func(column []chan int) { + var maxWidth int + for _, ch := range column { + if w := <-ch; w > maxWidth { + maxWidth = w + } + } + for _, ch := range column { + ch <- maxWidth } } diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/tools/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/tools/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go new file mode 100644 index 000000000..af5e17fee --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -0,0 +1,186 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package inspector provides helper functions for traversal over the +// syntax trees of a package, including node filtering by type, and +// materialization of the traversal stack. +// +// During construction, the inspector does a complete traversal and +// builds a list of push/pop events and their node type. Subsequent +// method calls that request a traversal scan this list, rather than walk +// the AST, and perform type filtering using efficient bit sets. +// +// Experiments suggest the inspector's traversals are about 2.5x faster +// than ast.Inspect, but it may take around 5 traversals for this +// benefit to amortize the inspector's construction cost. +// If efficiency is the primary concern, do not use Inspector for +// one-off traversals. +package inspector + +// There are four orthogonal features in a traversal: +// 1 type filtering +// 2 pruning +// 3 postorder calls to f +// 4 stack +// Rather than offer all of them in the API, +// only a few combinations are exposed: +// - Preorder is the fastest and has fewest features, +// but is the most commonly needed traversal. +// - Nodes and WithStack both provide pruning and postorder calls, +// even though few clients need it, because supporting two versions +// is not justified. +// More combinations could be supported by expressing them as +// wrappers around a more generic traversal, but this was measured +// and found to degrade performance significantly (30%). + +import ( + "go/ast" +) + +// An Inspector provides methods for inspecting +// (traversing) the syntax trees of a package. +type Inspector struct { + events []event +} + +// New returns an Inspector for the specified syntax trees. +func New(files []*ast.File) *Inspector { + return &Inspector{traverse(files)} +} + +// An event represents a push or a pop +// of an ast.Node during a traversal. +type event struct { + node ast.Node + typ uint64 // typeOf(node) + index int // 1 + index of corresponding pop event, or 0 if this is a pop +} + +// Preorder visits all the nodes of the files supplied to New in +// depth-first order. It calls f(n) for each node n before it visits +// n's children. +// +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { + // Because it avoids postorder calls to f, and the pruning + // check, Preorder is almost twice as fast as Nodes. The two + // features seem to contribute similar slowdowns (~1.4x each). + + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.typ&mask != 0 { + if ev.index > 0 { + f(ev.node) + } + } + i++ + } +} + +// Nodes visits the nodes of the files supplied to New in depth-first +// order. It calls f(n, true) for each node n before it visits n's +// children. If f returns true, Nodes invokes f recursively for each +// of the non-nil children of the node, followed by a call of +// f(n, false). +// +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.typ&mask != 0 { + if ev.index > 0 { + // push + if !f(ev.node, true) { + i = ev.index // jump to corresponding pop + 1 + continue + } + } else { + // pop + f(ev.node, false) + } + } + i++ + } +} + +// WithStack visits nodes in a similar manner to Nodes, but it +// supplies each call to f an additional argument, the current +// traversal stack. The stack's first element is the outermost node, +// an *ast.File; its last is the innermost, n. +func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { + mask := maskOf(types) + var stack []ast.Node + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > 0 { + // push + stack = append(stack, ev.node) + if ev.typ&mask != 0 { + if !f(ev.node, true, stack) { + i = ev.index + stack = stack[:len(stack)-1] + continue + } + } + } else { + // pop + if ev.typ&mask != 0 { + f(ev.node, false, stack) + } + stack = stack[:len(stack)-1] + } + i++ + } +} + +// traverse builds the table of events representing a traversal. +func traverse(files []*ast.File) []event { + // Preallocate approximate number of events + // based on source file extent. + // This makes traverse faster by 4x (!). + var extent int + for _, f := range files { + extent += int(f.End() - f.Pos()) + } + // This estimate is based on the net/http package. + capacity := extent * 33 / 100 + if capacity > 1e6 { + capacity = 1e6 // impose some reasonable maximum + } + events := make([]event, 0, capacity) + + var stack []event + for _, f := range files { + ast.Inspect(f, func(n ast.Node) bool { + if n != nil { + // push + ev := event{ + node: n, + typ: typeOf(n), + index: len(events), // push event temporarily holds own index + } + stack = append(stack, ev) + events = append(events, ev) + } else { + // pop + ev := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + events[ev.index].index = len(events) + 1 // make push refer to pop + + ev.index = 0 // turn ev into a pop event + events = append(events, ev) + } + return true + }) + } + + return events +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go new file mode 100644 index 000000000..d61301b13 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -0,0 +1,216 @@ +package inspector + +// This file defines func typeOf(ast.Node) uint64. +// +// The initial map-based implementation was too slow; +// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196 + +import "go/ast" + +const ( + nArrayType = iota + nAssignStmt + nBadDecl + nBadExpr + nBadStmt + nBasicLit + nBinaryExpr + nBlockStmt + nBranchStmt + nCallExpr + nCaseClause + nChanType + nCommClause + nComment + nCommentGroup + nCompositeLit + nDeclStmt + nDeferStmt + nEllipsis + nEmptyStmt + nExprStmt + nField + nFieldList + nFile + nForStmt + nFuncDecl + nFuncLit + nFuncType + nGenDecl + nGoStmt + nIdent + nIfStmt + nImportSpec + nIncDecStmt + nIndexExpr + nInterfaceType + nKeyValueExpr + nLabeledStmt + nMapType + nPackage + nParenExpr + nRangeStmt + nReturnStmt + nSelectStmt + nSelectorExpr + nSendStmt + nSliceExpr + nStarExpr + nStructType + nSwitchStmt + nTypeAssertExpr + nTypeSpec + nTypeSwitchStmt + nUnaryExpr + nValueSpec +) + +// typeOf returns a distinct single-bit value that represents the type of n. +// +// Various implementations were benchmarked with BenchmarkNewInspector: +// GOGC=off +// - type switch 4.9-5.5ms 2.1ms +// - binary search over a sorted list of types 5.5-5.9ms 2.5ms +// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms +// - linear scan, unordered list 6.4ms 2.7ms +// - hash table 6.5ms 3.1ms +// A perfect hash seemed like overkill. +// +// The compiler's switch statement is the clear winner +// as it produces a binary tree in code, +// with constant conditions and good branch prediction. +// (Sadly it is the most verbose in source code.) +// Binary search suffered from poor branch prediction. +// +func typeOf(n ast.Node) uint64 { + // Fast path: nearly half of all nodes are identifiers. + if _, ok := n.(*ast.Ident); ok { + return 1 << nIdent + } + + // These cases include all nodes encountered by ast.Inspect. + switch n.(type) { + case *ast.ArrayType: + return 1 << nArrayType + case *ast.AssignStmt: + return 1 << nAssignStmt + case *ast.BadDecl: + return 1 << nBadDecl + case *ast.BadExpr: + return 1 << nBadExpr + case *ast.BadStmt: + return 1 << nBadStmt + case *ast.BasicLit: + return 1 << nBasicLit + case *ast.BinaryExpr: + return 1 << nBinaryExpr + case *ast.BlockStmt: + return 1 << nBlockStmt + case *ast.BranchStmt: + return 1 << nBranchStmt + case *ast.CallExpr: + return 1 << nCallExpr + case *ast.CaseClause: + return 1 << nCaseClause + case *ast.ChanType: + return 1 << nChanType + case *ast.CommClause: + return 1 << nCommClause + case *ast.Comment: + return 1 << nComment + case *ast.CommentGroup: + return 1 << nCommentGroup + case *ast.CompositeLit: + return 1 << nCompositeLit + case *ast.DeclStmt: + return 1 << nDeclStmt + case *ast.DeferStmt: + return 1 << nDeferStmt + case *ast.Ellipsis: + return 1 << nEllipsis + case *ast.EmptyStmt: + return 1 << nEmptyStmt + case *ast.ExprStmt: + return 1 << nExprStmt + case *ast.Field: + return 1 << nField + case *ast.FieldList: + return 1 << nFieldList + case *ast.File: + return 1 << nFile + case *ast.ForStmt: + return 1 << nForStmt + case *ast.FuncDecl: + return 1 << nFuncDecl + case *ast.FuncLit: + return 1 << nFuncLit + case *ast.FuncType: + return 1 << nFuncType + case *ast.GenDecl: + return 1 << nGenDecl + case *ast.GoStmt: + return 1 << nGoStmt + case *ast.Ident: + return 1 << nIdent + case *ast.IfStmt: + return 1 << nIfStmt + case *ast.ImportSpec: + return 1 << nImportSpec + case *ast.IncDecStmt: + return 1 << nIncDecStmt + case *ast.IndexExpr: + return 1 << nIndexExpr + case *ast.InterfaceType: + return 1 << nInterfaceType + case *ast.KeyValueExpr: + return 1 << nKeyValueExpr + case *ast.LabeledStmt: + return 1 << nLabeledStmt + case *ast.MapType: + return 1 << nMapType + case *ast.Package: + return 1 << nPackage + case *ast.ParenExpr: + return 1 << nParenExpr + case *ast.RangeStmt: + return 1 << nRangeStmt + case *ast.ReturnStmt: + return 1 << nReturnStmt + case *ast.SelectStmt: + return 1 << nSelectStmt + case *ast.SelectorExpr: + return 1 << nSelectorExpr + case *ast.SendStmt: + return 1 << nSendStmt + case *ast.SliceExpr: + return 1 << nSliceExpr + case *ast.StarExpr: + return 1 << nStarExpr + case *ast.StructType: + return 1 << nStructType + case *ast.SwitchStmt: + return 1 << nSwitchStmt + case *ast.TypeAssertExpr: + return 1 << nTypeAssertExpr + case *ast.TypeSpec: + return 1 << nTypeSpec + case *ast.TypeSwitchStmt: + return 1 << nTypeSwitchStmt + case *ast.UnaryExpr: + return 1 << nUnaryExpr + case *ast.ValueSpec: + return 1 << nValueSpec + } + return 0 +} + +func maskOf(nodes []ast.Node) uint64 { + if nodes == nil { + return 1<<64 - 1 // match all node types + } + var mask uint64 + for _, n := range nodes { + mask |= typeOf(n) + } + return mask +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 397ab70be..10d7b3fbf 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -70,7 +70,7 @@ github.com/containernetworking/plugins/pkg/utils/hwaddr github.com/containernetworking/plugins/pkg/utils/sysctl github.com/containernetworking/plugins/plugins/ipam/host-local/backend github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator -# github.com/containers/buildah v1.19.2 +# github.com/containers/buildah v1.19.3 github.com/containers/buildah github.com/containers/buildah/bind github.com/containers/buildah/chroot @@ -108,7 +108,7 @@ github.com/containers/common/pkg/umask github.com/containers/common/version # github.com/containers/conmon v2.0.20+incompatible github.com/containers/conmon/runner/config -# github.com/containers/image/v5 v5.9.0 +# github.com/containers/image/v5 v5.10.1 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -120,6 +120,7 @@ github.com/containers/image/v5/docker/policyconfiguration github.com/containers/image/v5/docker/reference github.com/containers/image/v5/docker/tarfile github.com/containers/image/v5/image +github.com/containers/image/v5/internal/blobinfocache github.com/containers/image/v5/internal/iolimits github.com/containers/image/v5/internal/pkg/keyctl github.com/containers/image/v5/internal/pkg/platform @@ -348,7 +349,7 @@ github.com/json-iterator/go # github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a github.com/juju/ansiterm github.com/juju/ansiterm/tabwriter -# github.com/klauspost/compress v1.11.5 +# github.com/klauspost/compress v1.11.7 github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -398,7 +399,7 @@ github.com/nxadm/tail/ratelimiter github.com/nxadm/tail/util github.com/nxadm/tail/watch github.com/nxadm/tail/winfile -# github.com/onsi/ginkgo v1.14.2 +# github.com/onsi/ginkgo v1.15.0 github.com/onsi/ginkgo github.com/onsi/ginkgo/config github.com/onsi/ginkgo/extensions/table @@ -406,6 +407,7 @@ github.com/onsi/ginkgo/ginkgo github.com/onsi/ginkgo/ginkgo/convert github.com/onsi/ginkgo/ginkgo/interrupthandler github.com/onsi/ginkgo/ginkgo/nodot +github.com/onsi/ginkgo/ginkgo/outline github.com/onsi/ginkgo/ginkgo/testrunner github.com/onsi/ginkgo/ginkgo/testsuite github.com/onsi/ginkgo/ginkgo/watch @@ -502,7 +504,7 @@ github.com/prometheus/common/model # github.com/prometheus/procfs v0.0.3 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs -# github.com/rootless-containers/rootlesskit v0.12.0 +# github.com/rootless-containers/rootlesskit v0.13.0 github.com/rootless-containers/rootlesskit/pkg/msgutil github.com/rootless-containers/rootlesskit/pkg/port github.com/rootless-containers/rootlesskit/pkg/port/builtin @@ -553,7 +555,7 @@ github.com/uber/jaeger-client-go/transport github.com/uber/jaeger-client-go/utils # github.com/uber/jaeger-lib v2.2.0+incompatible github.com/uber/jaeger-lib/metrics -# github.com/ulikunitz/xz v0.5.8 +# github.com/ulikunitz/xz v0.5.9 github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog @@ -562,7 +564,7 @@ github.com/ulikunitz/xz/lzma github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v5 v5.3.0 +# github.com/vbauerster/mpb/v5 v5.4.0 github.com/vbauerster/mpb/v5 github.com/vbauerster/mpb/v5/cwriter github.com/vbauerster/mpb/v5/decor @@ -662,6 +664,8 @@ golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm # golang.org/x/time v0.0.0-20191024005414-555d28b269f0 golang.org/x/time/rate +# golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e +golang.org/x/tools/go/ast/inspector # golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 golang.org/x/xerrors golang.org/x/xerrors/internal |