diff options
120 files changed, 2535 insertions, 729 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index 39b2bdc1a..84a31e3ac 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -17,6 +17,7 @@ env: DEST_BRANCH: "master" # Overrides default location (/tmp/cirrus) for repo clone GOPATH: "/var/tmp/go" + GOBIN: "${GOPATH}/bin" GOSRC: "/var/tmp/go/src/github.com/containers/libpod" CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/libpod" # The default is 'sh' if unspecified @@ -33,11 +34,16 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### + FEDORA_NAME: "fedora-31" + PRIOR_FEDORA_NAME: "fedora-30" + UBUNTU_NAME: "ubuntu-19" + PRIOR_UBUNTU_NAME: "ubuntu-18" + _BUILT_IMAGE_SUFFIX: "libpod-5940307564953600" - FEDORA_CACHE_IMAGE_NAME: "fedora-31-${_BUILT_IMAGE_SUFFIX}" - PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-30-${_BUILT_IMAGE_SUFFIX}" - UBUNTU_CACHE_IMAGE_NAME: "ubuntu-19-${_BUILT_IMAGE_SUFFIX}" - PRIOR_UBUNTU_CACHE_IMAGE_NAME: "ubuntu-18-${_BUILT_IMAGE_SUFFIX}" + FEDORA_CACHE_IMAGE_NAME: "${FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}" + PRIOR_FEDORA_CACHE_IMAGE_NAME: "${PRIOR_FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}" + UBUNTU_CACHE_IMAGE_NAME: "${UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}" + PRIOR_UBUNTU_CACHE_IMAGE_NAME: "${PRIOR_UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}" #### #### Variables for composing new cache-images (used in PR testing) from @@ -88,17 +94,18 @@ gce_instance: # quick format, lint, and unit tests on the standard platform. gating_task: - # Only run this on PRs, never during post-merge testing. This is also required - # for proper setting of EPOCH_TEST_COMMIT value, required by validation tools. + # Only run this on PRs, never during post-merge testing (for speed). only_if: $CIRRUS_BRANCH != $DEST_BRANCH env: CIRRUS_WORKING_DIR: "/usr/src/libpod" - GOPATH: "/go" - GOSRC: "/go/src/github.com/containers/libpod" + SRCPATH: "$CIRRUS_WORKING_DIR" # Runs within Cirrus's "community cluster" container: + # Note: Image has dual purpose, see contrib/gate/README.md + # The entrypoint.sh script ensures a prestine copy of $SRCPATH is + # available at $GOSRC before executing make instructions. image: "quay.io/libpod/gate:master" cpu: 4 memory: 12 @@ -136,9 +143,9 @@ gating_task: # Verify some aspects of ci/related scripts ci_script: - - '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/lib.sh.t |& ${TIMESTAMP}' + - '${GOSRC}/${SCRIPT_BASE}/lib.sh.t |& ${TIMESTAMP}' - '/usr/local/bin/entrypoint.sh -C ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/packer test' - - '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/cirrus_yaml_test.py |& ${TIMESTAMP}' + - '${GOSRC}/${SCRIPT_BASE}/cirrus_yaml_test.py |& ${TIMESTAMP}' # Verify expected bash environment (-o pipefail) pipefail_enabledscript: 'if /bin/false | /bin/true; then echo "pipefail fault" && exit 72; fi' @@ -220,13 +227,14 @@ varlink_api_task: env: CIRRUS_WORKING_DIR: "/usr/src/libpod" - GOPATH: "/go" - GOSRC: "/go/src/github.com/containers/libpod" + SRCPATH: "$CIRRUS_WORKING_DIR" + EPOCH_TEST_COMMIT: "${CIRRUS_BASE_SHA}" # repo clone missing this data # Used by tree_status.sh SUGGESTION: 'remove API.md, then "make varlink_api_generate" and commit changes.' # Runs within Cirrus's "community cluster" container: + # Note: Image has dual purpose, see contrib/gate/README.md image: "quay.io/libpod/gate:master" cpu: 4 memory: 12 @@ -367,7 +375,7 @@ image_prune_task: # This task does the unit and integration testing for every platform testing_task: - + alias: "testing" depends_on: - "gating" - "vendor" @@ -380,21 +388,30 @@ testing_task: $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' - gce_instance: - matrix: - image_name: "${FEDORA_CACHE_IMAGE_NAME}" - image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" - # Multiple test failures on Ubuntu 19 - Fixes TBD in future PR - # TODO: image_name: "${UBUNTU_CACHE_IMAGE_NAME}" - image_name: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}" + matrix: + - name: "test ${FEDORA_NAME}" + gce_instance: + image_name: "${FEDORA_CACHE_IMAGE_NAME}" + - name: "test ${PRIOR_FEDORA_NAME}" + gce_instance: + image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + # Multiple test failures on Ubuntu 19 - Fixes TBD in future PR + # TODO: image_name: "${UBUNTU_CACHE_IMAGE_NAME}" + - name: "test ${PRIOR_UBUNTU_NAME}" + gce_instance: + image_name: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}" timeout_in: 120m env: ADD_SECOND_PARTITION: 'true' matrix: - TEST_REMOTE_CLIENT: 'true' - TEST_REMOTE_CLIENT: 'false' + - name: remote + env: + TEST_REMOTE_CLIENT: 'true' + - name: local + env: + TEST_REMOTE_CLIENT: 'false' networking_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/networking.sh' setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}' @@ -445,8 +462,12 @@ special_testing_rootless_task: ADD_SECOND_PARTITION: 'true' SPECIALMODE: 'rootless' # See docs matrix: - TEST_REMOTE_CLIENT: 'true' - TEST_REMOTE_CLIENT: 'false' + - name: remote + env: + TEST_REMOTE_CLIENT: 'true' + - name: local + env: + TEST_REMOTE_CLIENT: 'false' timeout_in: 60m @@ -464,7 +485,7 @@ special_testing_rootless_task: special_testing_in_podman_task: - + alias: "special_testing_in_podman" depends_on: - "gating" - "varlink_api" @@ -476,12 +497,13 @@ special_testing_in_podman_task: $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' - gce_instance: - matrix: - # FIXME: Integration testing currently broken for F31 hosts - # Error: container_linux.go:345: starting container process caused "process_linux.go:281: applying cgroup configuration for process caused \"mountpoint for cgroup not found\"": OCI runtime error - # image_name: "${FEDORA_CACHE_IMAGE_NAME}" - image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + matrix: + # FIXME: Integration testing currently broken for F31 hosts + # Error: container_linux.go:345: starting container process caused "process_linux.go:281: applying cgroup configuration for process caused \"mountpoint for cgroup not found\"": OCI runtime error + # image_name: "${FEDORA_CACHE_IMAGE_NAME}" + - name: "in-podman ${PRIOR_FEDORA_NAME}" + gce_instance: + image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" env: ADD_SECOND_PARTITION: 'true' @@ -506,7 +528,7 @@ special_testing_in_podman_task: special_testing_cross_task: - + alias: "special_testing_cross" depends_on: - "gating" - "varlink_api" @@ -516,10 +538,13 @@ special_testing_cross_task: $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' - env: - matrix: - CROSS_PLATFORM: 'windows' - CROSS_PLATFORM: 'darwin' + matrix: + - name: 'cross-platform: windows' + env: + CROSS_PLATFORM: 'windows' + - name: 'cross-platform: darwin' + env: + CROSS_PLATFORM: 'darwin' timeout_in: 20m @@ -646,15 +671,19 @@ verify_test_built_images_task: env: ADD_SECOND_PARTITION: 'true' matrix: - TEST_REMOTE_CLIENT: 'true' - TEST_REMOTE_CLIENT: 'false' + - name: remote + env: + TEST_REMOTE_CLIENT: 'true' + - name: local + env: + TEST_REMOTE_CLIENT: 'false' matrix: # Required env. var. by check_image_script - PACKER_BUILDER_NAME: "fedora-30" - PACKER_BUILDER_NAME: "fedora-31" - PACKER_BUILDER_NAME: "ubuntu-18" + PACKER_BUILDER_NAME: "${FEDORA_NAME}" + PACKER_BUILDER_NAME: "${PRIOR_FEDORA_NAME}" + PACKER_BUILDER_NAME: "${PRIOR_UBUNTU_NAME}" # Multiple test failures on ${UBUNTU_CACHE_IMAGE_NAME} - # PACKER_BUILDER_NAME: "ubuntu-19" + # PACKER_BUILDER_NAME: "${UBUNTU_NAME}" networking_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/networking.sh' installed_packages_script: '$SCRIPT_BASE/logcollector.sh packages' @@ -757,12 +786,13 @@ success_task: env: CIRRUS_WORKING_DIR: "/usr/src/libpod" - GOPATH: "/go" - GOSRC: "/go/src/github.com/containers/libpod" + SRCPATH: "$CIRRUS_WORKING_DIR" + EPOCH_TEST_COMMIT: "${CIRRUS_BASE_SHA}" # repo clone missing this data container: + # Note: Image has dual purpose, see contrib/gate/README.md image: "quay.io/libpod/gate:master" cpu: 1 memory: 1 - success_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/success.sh |& ${TIMESTAMP}' + success_script: '/usr/local/bin/entrypoint.sh ./$SCRIPT_BASE/success.sh |& ${TIMESTAMP}' diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..85914ee63 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +.git/ +.github/ +bin/ diff --git a/.golangci.yml b/.golangci.yml index dda1cc7ec..8b1062b4c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -24,7 +24,6 @@ linters: - goconst - gocyclo - golint - - goimports - gosec - lll - maligned diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index be13b6de3..7e94957eb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -261,24 +261,62 @@ commit automatically with `git commit -s`. All code changes must pass ``make validate`` and ``make lint``, as executed in a standard container. The container image for this -purpose is provided at: ``quay.io/libpod/gate:latest``. However, -for changes to the image itself, it may also be built locally -from the repository root, with the command: +purpose is provided at: ``quay.io/libpod/gate:master``. With +other tags available for different branches as needed. These +images are built automatically after merges to the branch. + +#### Building the gate container locally + +For local use, debugging, or experimentation, the gate image may +be built locally from the repository root, with the command: ``` -sudo podman build -t quay.io/libpod/gate:latest -f contrib/gate/Dockerfile . +podman build -t gate -f contrib/gate/Dockerfile . ``` ***N/B:*** **don't miss the dot (.) at the end, it's really important** -The container executes 'make' by default, on a copy of the repository. -This avoids changing or leaving build artifacts in your working directory. +#### Local use of gate container + +The gate container's entry-point executes 'make' by default, on a copy of +the repository made at runtime. This avoids the container changing or +leaving build artifacts in your hosts working directory. It also guarantees +every execution is based upon pristine code provided from the host. + Execution does not require any special permissions from the host. However, -the repository root must be bind-mounted into the container at -'/usr/src/libpod'. For example, running `make lint` is done (from -the repository root) with the command: +your libpod repository clone's root must be bind-mounted to the container at +'/usr/src/libpod'. The copy will be made into /var/tmp/go (`$GOSRC` in container) +before running your make target. For example, running `make lint` from a +repository clone at $HOME/devel/libpod could be done with the commands: + +```bash +$ cd $HOME/devel/libpod +$ podman run -it --rm -v $PWD:/usr/src/libpod:ro \ + --security-opt label=disable quay.io/libpod/gate:master \ + lint +``` + +***N/B:*** Depending on your clone's git remotes-configuration, +(esp. for `validate` and `lint` targets), you may also need to reference the +commit which was your upstream fork-point. Otherwise you may receive an error +similar to: -``sudo podman run -it --rm -v $PWD:/usr/src/libpod:ro --security-opt label=disable quay.io/libpod/gate:latest lint`` +``` +fatal: Not a valid object name master +Makefile:152: *** Required variable EPOCH_TEST_COMMIT value is undefined, whitespace, or empty. Stop. +``` + +For example, assuming your have a remote called `upstream` running the +validate target should be done like this: + +```bash +$ cd $HOME/devel/libpod +$ git remote update upstream +$ export EPOCH_TEST_COMMIT=$(git merge-base upstream/master HEAD) +$ podman run -it --rm -e EPOCH_TEST_COMMIT -v $PWD:/usr/src/libpod:ro \ + --security-opt label=disable quay.io/libpod/gate:master \ + validate +``` ### Integration Tests @@ -394,7 +394,7 @@ man-page-check: .PHONY: codespell codespell: - codespell -S bin,vendor,.git,go.sum,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,bin2img,*ico,*.png,*.1,*.5,copyimg,*.orig,apidoc.go" -L uint,iff,od,seeked + codespell -S bin,vendor,.git,go.sum,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,bin2img,*ico,*.png,*.1,*.5,copyimg,*.orig,apidoc.go" -L uint,iff,od,seeked,splitted,marge,ERRO,hist -w # When publishing releases include critical build-time details .PHONY: release.txt diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index f813b494f..513a3eeca 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -4,9 +4,11 @@ ### Features - Many networking-related flags have been added to `podman pod create` to enable customization of pod networks, including `--add-host`, `--dns`, `--dns-opt`, `--dns-search`, `--ip`, `--mac-address`, `--network`, and `--no-hosts` - The `podman ps --format=json` command now includes the ID of the image containers were created with +- The `podman run` and `podman create` commands now feature an `--rmi` flag to remove the image the container was using after it exits (if no other containers are using said image) ([#4628](https://github.com/containers/libpod/issues/4628)) - The `podman create` and `podman run` commands now support the `--device-cgroup-rule` flag ([#4876](https://github.com/containers/libpod/issues/4876)) - While the HTTP API remains in alpha, many fixes and additions have landed. These are documented in a separate subsection below - The `podman create` and `podman run` commands now feature a `--no-healthcheck` flag to disable healthchecks for a container ([#5299](https://github.com/containers/libpod/issues/5299)) +- Containers now recognize the `io.containers.capabilities` label, which specifies a list of capabilities required by the image to run. These capabilities will be used as long as they are more restrictive than the default capabilities used ### Bugfixes - Fixed CVE-2020-1726, a security issue where volumes manually populated before first being mounted into a container could have those contents overwritten on first being mounted into a container @@ -33,6 +35,8 @@ - Fixed a bug where the `--uts` flag to `podman create` and `podman run` would only allow specifying containers by full ID ([#5289](https://github.com/containers/libpod/issues/5289)) - Fixed a bug where rootless Podman could segfault when passed a large number of file descriptors - Fixed a bug where the `podman port` command was incorrectly interpreting additional arguments as container names, instead of port numbers +- Fixed a bug where units created by `podman generate systemd` did not depend on network targets, and so could start before the system network was ready ([#4130](https://github.com/containers/libpod/issues/4130)) +- Fixed a bug where exec sessions in containers which did not specify a user would not inherit supplemental groups added to the container via `--group-add` ### HTTP API - Initial support for secure connections to servers via SSH tunneling has been added @@ -47,7 +51,7 @@ - Many fixes have been made to API documentation to ensure it matches the code ### Misc -- Updated vendored Buildah to v1.14.1 +- Updated vendored Buildah to v1.14.2 - Updated vendored containers/storage to v1.16.0 - The `Created` field to `podman images --format=json` has been renamed to `CreatedSince` as part of the fix for ([#5110](https://github.com/containers/libpod/issues/5110)). Go templates using the old name should still work - The `CreatedTime` field to `podman images --format=json` has been renamed to `CreatedAt` as part of the fix for ([#5110](https://github.com/containers/libpod/issues/5110)). Go templates using the old name should still work diff --git a/changelog.txt b/changelog.txt index 84d6dcea0..9b0968265 100644 --- a/changelog.txt +++ b/changelog.txt @@ -1,3 +1,56 @@ +- Changelog for v1.8.1-rc3 (2020-03-06) + * Update release notes for v1.8.1-RC3 + * Part 2: try to clean up the long image instance names + * WIP: Try renaming long cirrus job names + * vendor: update github.com/containernetworking/cni to v0.7.2-0.20200304161608-4fae32b84921 + * Removed extraneous comments and defaults plus amended variable declaration + * Removed the unnecessary code + * Implemented size parameter on GetContainer + * Implement size parameter on ListContainers + * Map configured status to created to match docker API states + * Fix to remove null entry from end of images json + * Register handlers without version to align with docker API + * golangci: enable goimports + * generate systemd: remove leading slashes + * exec: fix error code when conmon fails + * Vendor buildah 1.14.2 + * env: don't set "container" env + * Fix podman image sign help output + * avoid adding to nil map + * Exec: use ErrorConmonRead + * exec: get the exit code from sync pipe instead of file + * generate systemd: add network dependencies + * Bump to Buildah v1.14.1 + * APIv2 tests: add tests for stop + * Add the rmi flag to podman-run to delete container image + * consolidate env handling into pkg/env + * CI: format cirrus logs + * Update docs/source/markdown/podman-build.1.md + * Allow devs to set labels in container images for default capabilities. + * CI: add API v2 tests + * more swagger fixes + * Bump github.com/opencontainers/selinux from 1.3.2 to 1.3.3 + * Add validate() for containers + * Cirrus: Fix gate image & false-positive exits + * Update pod bindings and Add test to validate prune pod apiv2 binding. + * Fix wrong condition in bindings test + * Ensure that exec sessions inherit supplemental groups + * Cirrus: Update VM images + * Cirrus: Force runc use in F30 + * rework apiv2 wait endpoint|binding + * build: specify input fd to buildah + * Cirrus: Remove unnecessary handle_crun workaround + * Cirrus: Print env. vars at end of setup. + * Cirrus: Fix not growing Fedora root + * network create should use firewall plugin + * add firewall plugin (no backend) to default cni config + * binding tests for volumes + * Bump to v1.8.1-dev + * container Exists: fix URL + * CI: package_versions: include hostinfo, kernel + * Review comments + * [WIP] Add cmd flag to show container name in log + - Changelog for v1.8.1-rc2 (2020-02-27) * Update release notes for v1.8.1-rc2 * Vendor in latest containers/buildah diff --git a/cmd/podman/diff.go b/cmd/podman/diff.go index f052b510d..c15512360 100644 --- a/cmd/podman/diff.go +++ b/cmd/podman/diff.go @@ -2,6 +2,7 @@ package main import ( "fmt" + "github.com/containers/buildah/pkg/formats" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/pkg/adapter" diff --git a/cmd/podman/main.go b/cmd/podman/main.go index 3320ab72f..a2acbbf53 100644 --- a/cmd/podman/main.go +++ b/cmd/podman/main.go @@ -138,6 +138,7 @@ func before(cmd *cobra.Command, args []string) error { logrus.Info("running as rootless") } setUMask() + return profileOn(cmd) } diff --git a/cmd/podman/network_list.go b/cmd/podman/network_list.go index 16edf743b..4f2380067 100644 --- a/cmd/podman/network_list.go +++ b/cmd/podman/network_list.go @@ -4,6 +4,7 @@ package main import ( "errors" + "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/pkg/adapter" "github.com/containers/libpod/pkg/rootless" diff --git a/cmd/podman/pod_pause.go b/cmd/podman/pod_pause.go index 320072919..24fcee6b9 100644 --- a/cmd/podman/pod_pause.go +++ b/cmd/podman/pod_pause.go @@ -2,6 +2,7 @@ package main import ( "fmt" + "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/pkg/adapter" "github.com/pkg/errors" diff --git a/cmd/podman/shared/create.go b/cmd/podman/shared/create.go index 08d32df18..11b2bd027 100644 --- a/cmd/podman/shared/create.go +++ b/cmd/podman/shared/create.go @@ -494,7 +494,7 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod. if data != nil { configEnv, err := envLib.ParseSlice(data.Config.Env) if err != nil { - return nil, errors.Wrap(err, "error pasing image environment variables") + return nil, errors.Wrap(err, "error passing image environment variables") } env = envLib.Join(env, configEnv) } diff --git a/cmd/podman/sign.go b/cmd/podman/sign.go index bc909b64e..8ac59b33b 100644 --- a/cmd/podman/sign.go +++ b/cmd/podman/sign.go @@ -35,8 +35,8 @@ var ( signCommand.Remote = remoteclient return signCmd(&signCommand) }, - Example: `podman sign --sign-by mykey imageID - podman sign --sign-by mykey --directory ./mykeydir imageID`, + Example: `podman image sign --sign-by mykey imageID + podman image sign --sign-by mykey --directory ./mykeydir imageID`, } ) diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh index 1ffe554e9..5895d84f4 100644 --- a/contrib/cirrus/lib.sh +++ b/contrib/cirrus/lib.sh @@ -68,9 +68,9 @@ export FEDORA_BASE_IMAGE="fedora-cloud-base-31-1-9-1578586410" export PRIOR_FEDORA_BASE_IMAGE="fedora-cloud-base-30-1-2-1578586410" export BUILT_IMAGE_SUFFIX="${BUILT_IMAGE_SUFFIX:--$CIRRUS_REPO_NAME-${CIRRUS_BUILD_ID}}" # IN_PODMAN container image -IN_PODMAN_IMAGE="quay.io/libpod/in_podman:latest" +IN_PODMAN_IMAGE="quay.io/libpod/in_podman:$DEST_BRANCH" # Image for uploading releases -UPLDREL_IMAGE="quay.io/libpod/upldrel:latest" +UPLDREL_IMAGE="quay.io/libpod/upldrel:master" # Avoid getting stuck waiting for user input export DEBIAN_FRONTEND="noninteractive" diff --git a/contrib/cirrus/packer/libpod_base_images.yml b/contrib/cirrus/packer/libpod_base_images.yml index 21f3795f1..255723d57 100644 --- a/contrib/cirrus/packer/libpod_base_images.yml +++ b/contrib/cirrus/packer/libpod_base_images.yml @@ -12,7 +12,7 @@ variables: # Required for output from qemu builders TTYDEV: - # Ubuntu releases are mearly copied to this project for control purposes + # Ubuntu releases are merely copied to this project for control purposes UBUNTU_BASE_IMAGE: PRIOR_UBUNTU_BASE_IMAGE: diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh index d2e1b8767..5001ef4dd 100755 --- a/contrib/cirrus/setup_environment.sh +++ b/contrib/cirrus/setup_environment.sh @@ -18,7 +18,7 @@ exithandler() { echo "$(basename $0) exit status: $RET" [[ "$RET" -eq "0" ]] && date +%s >> "$SETUP_MARKER_FILEPATH" show_env_vars - [ "$RET" -eq "0" ]] || warn "Non-zero exit caused by error ABOVE env. var. display." + [[ "$RET" -eq "0" ]] || warn "Non-zero exit caused by error ABOVE env. var. display." } trap exithandler EXIT diff --git a/contrib/gate/Dockerfile b/contrib/gate/Dockerfile index 2a904a202..54bd2cbde 100644 --- a/contrib/gate/Dockerfile +++ b/contrib/gate/Dockerfile @@ -33,31 +33,36 @@ RUN dnf -y install \ zip \ && dnf clean all -ENV GOPATH="/go" \ - PATH="/go/bin:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin" \ +ENV GOPATH="/var/tmp/go" \ + GOBIN="/var/tmp/go/bin" \ + PATH="/var/tmp/go/bin:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin" \ SRCPATH="/usr/src/libpod" \ - GOSRC="/go/src/github.com/containers/libpod" + GOSRC="/var/tmp/go/src/github.com/containers/libpod" -# Only needed for installing build-time dependencies +# Only needed for installing build-time dependencies, then will be removed COPY / $GOSRC -WORKDIR $GOSRC - # Install dependencies RUN set -x && \ + mkdir -p "$GOBIN" && \ + mkdir -p /etc/cni/net.d && \ + mkdir -p /etc/containers && \ install -D -m 755 $GOSRC/contrib/gate/entrypoint.sh /usr/local/bin/ && \ - python3 -m pip install pre-commit && \ - rm -rf "$GOSRC" + python3 -m pip install pre-commit # Install cni config -#RUN make install.cni -RUN mkdir -p /etc/cni/net.d/ COPY cni/87-podman-bridge.conflist /etc/cni/net.d/87-podman-bridge.conflist - # Make sure we have some policy for pulling images -RUN mkdir -p /etc/containers COPY test/policy.json /etc/containers/policy.json COPY test/redhat_sigstore.yaml /etc/containers/registries.d/registry.access.redhat.com.yaml +WORKDIR "$GOSRC" +RUN make install.tools && \ + cd / && \ + rm -rf "$GOSRC" && \ + mkdir -p "$GOSRC" VOLUME ["/usr/src/libpod"] +# This entrypoint will synchronize the above volume ($SRCPATH) to $GOSRC before +# executing make. This ensures the original source remains prestine and is never +# modified by any lint/validation checks. ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] diff --git a/contrib/gate/README.md b/contrib/gate/README.md index 709e6035f..fe1205dc5 100644 --- a/contrib/gate/README.md +++ b/contrib/gate/README.md @@ -1,4 +1,6 @@ ![PODMAN logo](../../logo/podman-logo-source.svg) -A standard container image for `gofmt` and lint-checking the libpod -repository. The [contributors guide contains the documentation for usage.](https://github.com/containers/libpod/blob/master/CONTRIBUTING.md#go-format-and-lint) +A standard container image for lint-checking and validating changes to the libpod +repository. The +[contributors guide contains the documentation for usage.](https://github.com/containers/libpod/blob/master/CONTRIBUTING.md#go-format-and-lint). Note that this container image is also utilized +in automation, see the file [.cirrus.yml](.cirrus.yml) diff --git a/contrib/gate/entrypoint.sh b/contrib/gate/entrypoint.sh index 0189cf7c5..ab6528e00 100755 --- a/contrib/gate/entrypoint.sh +++ b/contrib/gate/entrypoint.sh @@ -1,15 +1,23 @@ #!/bin/bash -[[ -n "$SRCPATH" ]] || \ - ( echo "ERROR: \$SRCPATH must be non-empty" && exit 1 ) -[[ -n "$GOSRC" ]] || \ - ( echo "ERROR: \$GOSRC must be non-empty" && exit 2 ) +set -e + +die() { + echo "${2:-FATAL ERROR (but no message given!)} (gate container entrypoint)" + exit ${1:-1} +} + +[[ -n "$SRCPATH" ]] || die 1 "ERROR: \$SRCPATH must be non-empty" +[[ -n "$GOPATH" ]] || die 2 "ERROR: \$GOPATH must be non-empty" +[[ -n "$GOSRC" ]] || die 3 "ERROR: \$GOSRC must be non-empty" [[ -r "${SRCPATH}/contrib/gate/Dockerfile" ]] || \ - ( echo "ERROR: Expecting libpod repository root at $SRCPATH" && exit 3 ) + die 4 "ERROR: Expecting libpod repository root at $SRCPATH" # Working from a copy avoids needing to perturb the actual source files -mkdir -p "$GOSRC" +# if/when developers use gate container for local testing +echo "Copying $SRCPATH to $GOSRC" +mkdir -vp "$GOSRC" /usr/bin/rsync --recursive --links --quiet --safe-links \ --perms --times --delete "${SRCPATH}/" "${GOSRC}/" cd "$GOSRC" -make "$@" +exec make "$@" diff --git a/contrib/podmanimage/README.md b/contrib/podmanimage/README.md index ab55f3189..9d841cdba 100644 --- a/contrib/podmanimage/README.md +++ b/contrib/podmanimage/README.md @@ -10,10 +10,10 @@ the images live are public and can be pulled without credentials. These contain resulting containers can run safely with privileges within the container. The container images are built using the latest Fedora and then Podman is installed into them: - * quay.io/podman/stable - This image is built using the latest stable version of Podman in a Fedora based container. Built with podman/stable/Dockerfile. - * quay.io/podman/upstream - This image is built using the latest code found in this GitHub repository. When someone creates a commit and pushes it, the image is created. Due to that the image changes frequently and is not guaranteed to be stable. Built with podmanimage/upstream/Dockerfile. - * quay.io/podman/testing - This image is built using the latest version of Podman that is or was in updates testing for Fedora. At times this may be the same as the stable image. This container image will primarily be used by the development teams for verification testing when a new package is created. Built with podmanimage/testing/Dockerfile. - + * quay.io/podman/stable - This image is built using the latest stable version of Podman in a Fedora based container. Built with [podmanimage/stable/Dockerfile](stable/Dockerfile). + * quay.io/podman/upstream - This image is built using the latest code found in this GitHub repository. When someone creates a commit and pushes it, the image is created. Due to that the image changes frequently and is not guaranteed to be stable. Built with [podmanimage/upstream/Dockerfile](upstream/Dockerfile). + * quay.io/podman/testing - This image is built using the latest version of Podman that is or was in updates testing for Fedora. At times this may be the same as the stable image. This container image will primarily be used by the development teams for verification testing when a new package is created. Built with [podmanimage/testing/Dockerfile](testing/Dockerfile). + * quay.io/podman/stable:version - This image is built manually using a Fedora based container. An RPM is first pulled from the [Fedora Updates System](https://bodhi.fedoraproject.org/) and the image is built from there. For more details, see the Containerfile used to build it, [podmanimage/stable/manual/Containerfile](stable/manual/Containerfile). ## Sample Usage diff --git a/contrib/podmanimage/stable/manual/Containerfile b/contrib/podmanimage/stable/manual/Containerfile new file mode 100644 index 000000000..d76d6d9b4 --- /dev/null +++ b/contrib/podmanimage/stable/manual/Containerfile @@ -0,0 +1,39 @@ +# stable/manual/Containerfile +# +# Build a Podman container image from the latest +# stable version of Podman on the Fedora Updates System. +# https://bodhi.fedoraproject.org/updates/?search=podman +# This image can be used to create a secured container +# that runs safely with privileges within the container. +# This Containerfile builds version 1.7.0, the version and +# the RPM name would need to be adjusted before a run as +# appropriate. +# +# To use, first copy an rpm file from bohdi to `/root/tmp` +# and then run: +# 'podman build -f ./Containerfile -t quay.io/podman/stable:v1.7.0 .' +# +# Once complete run: +# `podman push quay.io/stable:v1.7.0 docker://quay.io/podman/stable:v1.7.0` +# +# Start Build Process using the latest Fedora +FROM fedora:latest + +# Don't include container-selinux and remove +# directories used by dnf that are just taking +# up space. +# +COPY /tmp/podman-1.7.0-3.fc30.x86_64.rpm /tmp +RUN yum -y install /tmp/podman-1.7.0-3.fc30.x86_64.rpm fuse-overlayfs --exclude container-selinux; rm -rf /var/cache /var/log/dnf* /var/log/yum.* /tmp/podman*.rpm + +# Adjust storage.conf to enable Fuse storage. +RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' /etc/containers/storage.conf +RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock + +# Adjust libpod.conf to write logging to a file +RUN sed -i 's/events_logger = "journald"/events_logger = "file"/g' /usr/share/containers/libpod.conf; mkdir -p /run/systemd/journal + +# Set up environment variables to note that this is +# not starting with usernamespace and default to +# isolate the filesystem with chroot. +ENV _BUILDAH_STARTED_IN_USERNS="" BUILDAH_ISOLATION=chroot diff --git a/contrib/podmanimage/upstream/Dockerfile b/contrib/podmanimage/upstream/Dockerfile index 7c9434fa6..847097920 100644 --- a/contrib/podmanimage/upstream/Dockerfile +++ b/contrib/podmanimage/upstream/Dockerfile @@ -19,16 +19,16 @@ ENV GOPATH=/root/podman # that are needed for building but not running Podman RUN useradd build; yum -y update; yum -y reinstall shadow-utils; yum -y install --exclude container-selinux \ --enablerepo=updates-testing \ - atomic-registries \ btrfs-progs-devel \ containernetworking-cni \ + conmon \ device-mapper-devel \ git \ glib2-devel \ glibc-devel \ glibc-static \ go \ - golang-github-cpuguy83-go-md2man \ + golang-github-cpuguy83-md2man \ gpgme-devel \ iptables \ libassuan-devel \ diff --git a/docs/source/markdown/podman-build.1.md b/docs/source/markdown/podman-build.1.md index 3f0bfc57b..e08eebc24 100644 --- a/docs/source/markdown/podman-build.1.md +++ b/docs/source/markdown/podman-build.1.md @@ -282,8 +282,8 @@ Add an image *label* (e.g. label=*value*) to the image metadata. Can be used mul Users can set a special LABEL **io.containers.capabilities=CAP1,CAP2,CAP3** in a Containerfile that specified the list of Linux capabilities required for the container to run properly. This label specified in a container image tells -Podman to run the container with just these capabilties. Podman launches the -container with just the specified capabilties, as long as this list of +Podman to run the container with just these capabilities. Podman launches the +container with just the specified capabilities, as long as this list of capabilities is a subset of the default list. If the specified capabilities are not in the default set, Podman will @@ -6,14 +6,14 @@ require ( github.com/BurntSushi/toml v0.3.1 github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37 github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b - github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 + github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921 github.com/containernetworking/plugins v0.8.5 - github.com/containers/buildah v1.14.1 + github.com/containers/buildah v1.14.2 github.com/containers/common v0.4.2 github.com/containers/conmon v2.0.10+incompatible github.com/containers/image/v5 v5.2.1 github.com/containers/psgo v1.4.0 - github.com/containers/storage v1.16.0 + github.com/containers/storage v1.16.1 github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b github.com/cyphar/filepath-securejoin v0.2.2 @@ -66,6 +66,8 @@ github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kw github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 h1:rqUVLD8I859xRgUx/WMC3v7QAFqbLKZbs+0kqYboRJc= github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921 h1:eUMd8hlGasYcg1tBqETZtxaW3a7EIxqY7Z1g65gcKQg= +github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/plugins v0.8.5 h1:pCvEMrFf7yzJI8+/D/7jkvE96KD52b7/Eu+jpahihy8= github.com/containernetworking/plugins v0.8.5/go.mod h1:UZ2539umj8djuRQmBxuazHeJbYrLV8BSBejkk+she6o= github.com/containers/buildah v1.13.1 h1:EdhllQxXmOZ56mGFf68AkrpIj9XtEkkGq0WaPWFuGM0= @@ -80,6 +82,8 @@ github.com/containers/buildah v1.14.1-0.20200227103754-f0c3fd7c3d34 h1:SaK9ADT5J github.com/containers/buildah v1.14.1-0.20200227103754-f0c3fd7c3d34/go.mod h1:sdMVVcCTvvAj9o9dk/j6EnNJJadjxqjcI4Yy9WoWxSg= github.com/containers/buildah v1.14.1 h1:H0uubyWJN98xRFmwzJeJDb5NIypx+sPcJu5kCzO6hGs= github.com/containers/buildah v1.14.1/go.mod h1:sdMVVcCTvvAj9o9dk/j6EnNJJadjxqjcI4Yy9WoWxSg= +github.com/containers/buildah v1.14.2 h1:rzrOVqWL3C3xA3MBmkDgWntRsBgkI3FGKODluBO+svU= +github.com/containers/buildah v1.14.2/go.mod h1:HZ6MuZfHYq6ZMeoV9o3k9GwoCk1p3RWZOYbBXZtR7wE= github.com/containers/common v0.0.7 h1:eKYZLKfJ2d/RNDgecLDFv45cHb4imYzIcrQHx1Y029M= github.com/containers/common v0.0.7/go.mod h1:lhWV3MLhO1+KGE2x6v9+K38MxpjXGso+edmpkFnCOqI= github.com/containers/common v0.3.0 h1:9ysL/OfPcMls1Ac3jzFA4XZJVSD/JG7Dst3uQSwQtwA= @@ -100,6 +104,8 @@ github.com/containers/psgo v1.4.0/go.mod h1:ENXXLQ5E1At4K0EUsGogXBJi/C28gwqkONWe github.com/containers/storage v1.15.8/go.mod h1:zhvjIIl/fR6wt/lgqQAC+xanHQ+8gUQ0GBVeXYN81qI= github.com/containers/storage v1.16.0 h1:sD+s7BmiNBh61CuHN3j8PXGCwMtV9zPVJETAlshIf3w= github.com/containers/storage v1.16.0/go.mod h1:nqN09JSi1/RSI1UAUwDYXPRiGSlq5FPbNkN/xb0TfG0= +github.com/containers/storage v1.16.1 h1:gVLVqbqaoyopLJbcQ9PQdsnm8SzVy6Vw24fofwMgkE0= +github.com/containers/storage v1.16.1/go.mod h1:toFp72SLn/iyJ6YbrnrZ0bW63aH2Qw3dA8JVwL4ADPo= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38= @@ -270,6 +276,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y= github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0= +github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= diff --git a/hack/get_release_info.sh b/hack/get_release_info.sh index c2be6a270..c1c694a44 100755 --- a/hack/get_release_info.sh +++ b/hack/get_release_info.sh @@ -6,8 +6,7 @@ set -euo pipefail -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -cd "${GOSRC:-${DIR}/../}" +cd "${GOSRC:-$(dirname $0)/../}" valid_args() { REGEX='^\s+[[:upper:]]+\*[)]' diff --git a/hack/install_golangci.sh b/hack/install_golangci.sh index 430685a71..6ef8ce823 100755 --- a/hack/install_golangci.sh +++ b/hack/install_golangci.sh @@ -1,17 +1,17 @@ #!/bin/bash -if [ -z "$VERSION" ]; then - echo \$VERSION is empty - exit 1 -fi +set -e -if [ -z "$GOBIN" ]; then - echo \$GOBIN is empty - exit 1 -fi +die() { echo "${1:-No error message given} (from $(basename $0))"; exit 1; } + +[ -n "$VERSION" ] || die "\$VERSION is empty or undefined" +[ -n "$GOBIN" ] || die "\$GOBIN is empty or undefined" -$GOBIN/golangci-lint --version | grep $VERSION -if [ $? -ne 0 ]; then - set -e - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $GOBIN v$VERSION +BIN="$GOBIN/golangci-lint" +if [ ! -x "$BIN" ]; then + echo "Installing golangci-lint v$VERSION into $GOBIN" + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $GOBIN v$VERSION +else + # Prints it's own file name as part of --verison output + echo "Using existing $(dirname $BIN)/$($BIN --version)" fi diff --git a/libpod/config/config.go b/libpod/config/config.go index c72a0efc7..5d59f1bf2 100644 --- a/libpod/config/config.go +++ b/libpod/config/config.go @@ -437,7 +437,7 @@ func probeConmon(conmonBinary string) error { // with cgroupsv2. Other OCI runtimes are not yet supporting cgroupsv2. This // might change in the future. func NewConfig(userConfigPath string) (*Config, error) { - // Start with the default config and interatively merge fields in the system + // Start with the default config and iteratively merge fields in the system // configs. config, err := defaultConfigFromMemory() if err != nil { diff --git a/libpod/container_api.go b/libpod/container_api.go index 356da12d0..5e8fcea47 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -340,6 +340,12 @@ func (c *Container) Exec(tty, privileged bool, env map[string]string, cmd []stri if lastErr != nil { logrus.Errorf(lastErr.Error()) } + // ErrorConmonRead is a bogus value set by podman to indicate reading a value from + // conmon failed. Since it is specifically not a valid exit code, we should set + // a generic error here + if exitCodeData.data == define.ErrorConmonRead { + exitCodeData.data = define.ExecErrorCodeGeneric + } lastErr = errors.Wrapf(define.ErrOCIRuntime, "non zero exit code: %d", exitCodeData.data) } @@ -406,7 +412,7 @@ func (c *Container) Attach(streams *AttachStreams, keys string, resize <-chan re // HTTPAttach forwards an attach session over a hijacked HTTP session. // HTTPAttach will consume and close the included httpCon, which is expected to // be sourced from a hijacked HTTP connection. -// The cancel channel is optional, and can be used to asyncronously cancel the +// The cancel channel is optional, and can be used to asynchronously cancel the // attach session. // The streams variable is only supported if the container was not a terminal, // and allows specifying which of the container's standard streams will be diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go index a543a19c0..50ae72499 100644 --- a/libpod/container_inspect.go +++ b/libpod/container_inspect.go @@ -670,8 +670,8 @@ type InspectAdditionalNetwork struct { // DriverOpts is presently unused and maintained exclusively for // compatibility. DriverOpts map[string]string `json:"DriverOpts"` - // IPAMConfig is presently unused and maintained exlusively for - // compabitility. + // IPAMConfig is presently unused and maintained exclusively for + // compatibility. IPAMConfig map[string]string `json:"IPAMConfig"` // Links is presently unused and maintained exclusively for // compatibility. diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 67e02cc31..60b13f125 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -929,7 +929,7 @@ func (c *Container) completeNetworkSetup() error { return err } for _, line := range strings.Split(string(b), "\n") { - // only keep things that dont start with nameserver from the old + // only keep things that don't start with nameserver from the old // resolv.conf file if !strings.HasPrefix(line, "nameserver") { outResolvConf = append([]string{line}, outResolvConf...) diff --git a/libpod/image/filters.go b/libpod/image/filters.go index c54ca6333..8ca3526a0 100644 --- a/libpod/image/filters.go +++ b/libpod/image/filters.go @@ -3,13 +3,13 @@ package image import ( "context" "fmt" - "github.com/pkg/errors" "path/filepath" "strconv" "strings" "time" "github.com/containers/libpod/pkg/inspect" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index d57b1a8eb..5a27a2abb 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -117,10 +117,10 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re networkStatus := make([]*cnitypes.Result, 0) for idx, r := range results { - logrus.Debugf("[%d] CNI result: %v", idx, r.Result.String()) + logrus.Debugf("[%d] CNI result: %v", idx, r.Result) resultCurrent, err := cnitypes.GetResult(r.Result) if err != nil { - return nil, errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.Result.String(), err) + return nil, errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.Result, err) } networkStatus = append(networkStatus, resultCurrent) } diff --git a/libpod/oci.go b/libpod/oci.go index e5f9b2135..41d420664 100644 --- a/libpod/oci.go +++ b/libpod/oci.go @@ -55,7 +55,7 @@ type OCIRuntime interface { // to output; otherwise, STDOUT and STDERR will be multiplexed, with // a header prepended as follows: 1-byte STREAM (0, 1, 2 for STDIN, // STDOUT, STDERR), 3 null (0x00) bytes, 4-byte big endian length. - // If a cancel channel is provided, it can be used to asyncronously + // If a cancel channel is provided, it can be used to asynchronously // termninate the attach session. Detach keys, if given, will also cause // the attach session to be terminated if provided via the STDIN // channel. If they are not provided, the default detach keys will be diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index de93fdce7..ba2a6b93e 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -10,6 +10,7 @@ import ( "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" + "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/rootless" "github.com/containers/storage/pkg/stringid" spec "github.com/opencontainers/runtime-spec/specs-go" @@ -438,9 +439,16 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, if err := c.ociRuntime.KillContainer(c, 9, false); err != nil { return err } - if err := c.unpause(); err != nil { + isV2, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { return err } + // cgroups v1 and v2 handle signals on paused processes differently + if !isV2 { + if err := c.unpause(); err != nil { + return err + } + } // Need to update container state to make sure we know it's stopped if err := c.waitForExitFileAndSync(); err != nil { return err diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go index bae1c1ed8..6c45a2300 100644 --- a/libpod/runtime_img.go +++ b/libpod/runtime_img.go @@ -21,7 +21,7 @@ import ( "github.com/containers/image/v5/directory" dockerarchive "github.com/containers/image/v5/docker/archive" ociarchive "github.com/containers/image/v5/oci/archive" - "github.com/opencontainers/image-spec/specs-go/v1" + v1 "github.com/opencontainers/image-spec/specs-go/v1" ) // Runtime API diff --git a/libpod/runtime_pod_infra_linux.go b/libpod/runtime_pod_infra_linux.go index da46f03e8..27735a9b2 100644 --- a/libpod/runtime_pod_infra_linux.go +++ b/libpod/runtime_pod_infra_linux.go @@ -10,7 +10,7 @@ import ( "github.com/containers/libpod/libpod/image" "github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/util" - "github.com/opencontainers/image-spec/specs-go/v1" + v1 "github.com/opencontainers/image-spec/specs-go/v1" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/pkg/errors" diff --git a/libpod/storage.go b/libpod/storage.go index 6375d031b..d675f4ffe 100644 --- a/libpod/storage.go +++ b/libpod/storage.go @@ -8,7 +8,7 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/libpod/libpod/define" "github.com/containers/storage" - "github.com/opencontainers/image-spec/specs-go/v1" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" diff --git a/pkg/api/handlers/containers.go b/pkg/api/handlers/containers.go index ee080e794..1256256fd 100644 --- a/pkg/api/handlers/containers.go +++ b/pkg/api/handlers/containers.go @@ -2,12 +2,12 @@ package handlers import ( "fmt" - "github.com/docker/docker/api/types" "net/http" "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/api/handlers/utils" + "github.com/docker/docker/api/types" "github.com/gorilla/schema" "github.com/pkg/errors" ) @@ -72,7 +72,6 @@ func UnpauseContainer(w http.ResponseWriter, r *http.Request) { return } - // the api does not error if the Container is already paused, so just into it if err := con.Unpause(); err != nil { utils.InternalServerError(w, err) return diff --git a/pkg/api/handlers/generic/containers.go b/pkg/api/handlers/generic/containers.go index ab587ded4..b8460702c 100644 --- a/pkg/api/handlers/generic/containers.go +++ b/pkg/api/handlers/generic/containers.go @@ -57,6 +57,7 @@ func ListContainers(w http.ResponseWriter, r *http.Request) { }{ // override any golang type defaults } + if err := decoder.Decode(&query, r.URL.Query()); err != nil { utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String())) return @@ -85,7 +86,7 @@ func ListContainers(w http.ResponseWriter, r *http.Request) { var list = make([]*handlers.Container, len(containers)) for i, ctnr := range containers { - api, err := handlers.LibpodToContainer(ctnr, infoData) + api, err := handlers.LibpodToContainer(ctnr, infoData, query.Size) if err != nil { utils.InternalServerError(w, err) return @@ -97,6 +98,17 @@ func ListContainers(w http.ResponseWriter, r *http.Request) { func GetContainer(w http.ResponseWriter, r *http.Request) { runtime := r.Context().Value("runtime").(*libpod.Runtime) + decoder := r.Context().Value("decoder").(*schema.Decoder) + query := struct { + Size bool `schema:"size"` + }{ + // override any golang type defaults + } + + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String())) + return + } name := utils.GetName(r) ctnr, err := runtime.LookupContainer(name) @@ -104,7 +116,7 @@ func GetContainer(w http.ResponseWriter, r *http.Request) { utils.ContainerNotFound(w, name, err) return } - api, err := handlers.LibpodToContainerJSON(ctnr) + api, err := handlers.LibpodToContainerJSON(ctnr, query.Size) if err != nil { utils.InternalServerError(w, err) return diff --git a/pkg/api/handlers/generic/images.go b/pkg/api/handlers/generic/images.go index 1ced499d9..078896834 100644 --- a/pkg/api/handlers/generic/images.go +++ b/pkg/api/handlers/generic/images.go @@ -90,7 +90,7 @@ func PruneImages(w http.ResponseWriter, r *http.Request) { }) } - //FIXME/TODO to do this exacty correct, pruneimages needs to return idrs and space-reclaimed, then we are golden + //FIXME/TODO to do this exactly correct, pruneimages needs to return idrs and space-reclaimed, then we are golden ipr := types.ImagesPruneReport{ ImagesDeleted: idr, SpaceReclaimed: 1, // TODO we cannot supply this right now @@ -305,7 +305,7 @@ func GetImages(w http.ResponseWriter, r *http.Request) { utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "Failed get images")) return } - var summaries = make([]*handlers.ImageSummary, len(images)+1) + var summaries = make([]*handlers.ImageSummary, len(images)) for j, img := range images { is, err := handlers.ImageToImageSummary(img) if err != nil { diff --git a/pkg/api/handlers/libpod/pods.go b/pkg/api/handlers/libpod/pods.go index e8dc5bde2..ee697b6b7 100644 --- a/pkg/api/handlers/libpod/pods.go +++ b/pkg/api/handlers/libpod/pods.go @@ -181,7 +181,7 @@ func PodStop(w http.ResponseWriter, r *http.Request) { } // TODO we need to implement a pod.State/Status in libpod internal so libpod api - // users dont have to run through all containers. + // users don't have to run through all containers. podContainers, err := pod.AllContainers() if err != nil { utils.Error(w, "Something went wrong", http.StatusInternalServerError, err) @@ -227,7 +227,7 @@ func PodStart(w http.ResponseWriter, r *http.Request) { } // TODO we need to implement a pod.State/Status in libpod internal so libpod api - // users dont have to run through all containers. + // users don't have to run through all containers. podContainers, err := pod.AllContainers() if err != nil { utils.Error(w, "Something went wrong", http.StatusInternalServerError, err) diff --git a/pkg/api/handlers/types.go b/pkg/api/handlers/types.go index 2930a9567..2e429dc58 100644 --- a/pkg/api/handlers/types.go +++ b/pkg/api/handlers/types.go @@ -347,7 +347,7 @@ func ImageDataToImageInspect(ctx context.Context, l *libpodImage.Image) (*ImageI } -func LibpodToContainer(l *libpod.Container, infoData []define.InfoData) (*Container, error) { +func LibpodToContainer(l *libpod.Container, infoData []define.InfoData, sz bool) (*Container, error) { imageId, imageName := l.Image() var ( @@ -360,11 +360,18 @@ func LibpodToContainer(l *libpod.Container, infoData []define.InfoData) (*Contai if state, err = l.State(); err != nil { return nil, err } - if sizeRW, err = l.RWSize(); err != nil { - return nil, err + stateStr := state.String() + if stateStr == "configured" { + stateStr = "created" } - if sizeRootFs, err = l.RootFsSize(); err != nil { - return nil, err + + if sz { + if sizeRW, err = l.RWSize(); err != nil { + return nil, err + } + if sizeRootFs, err = l.RootFsSize(); err != nil { + return nil, err + } } return &Container{docker.Container{ @@ -378,7 +385,7 @@ func LibpodToContainer(l *libpod.Container, infoData []define.InfoData) (*Contai SizeRw: sizeRW, SizeRootFs: sizeRootFs, Labels: l.Labels(), - State: string(state), + State: stateStr, Status: "", HostConfig: struct { NetworkMode string `json:",omitempty"` @@ -391,9 +398,9 @@ func LibpodToContainer(l *libpod.Container, infoData []define.InfoData) (*Contai }, nil } -func LibpodToContainerJSON(l *libpod.Container) (*docker.ContainerJSON, error) { +func LibpodToContainerJSON(l *libpod.Container, sz bool) (*docker.ContainerJSON, error) { _, imageName := l.Image() - inspect, err := l.Inspect(true) + inspect, err := l.Inspect(sz) if err != nil { return nil, err } diff --git a/pkg/api/handlers/utils/handler.go b/pkg/api/handlers/utils/handler.go index 44bcc794c..32b8c5b0a 100644 --- a/pkg/api/handlers/utils/handler.go +++ b/pkg/api/handlers/utils/handler.go @@ -3,7 +3,6 @@ package utils import ( "encoding/json" "fmt" - "github.com/pkg/errors" "io" "net/http" "net/url" @@ -11,6 +10,7 @@ import ( "strings" "github.com/gorilla/mux" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) diff --git a/pkg/api/server/register_auth.go b/pkg/api/server/register_auth.go index 8db131153..7e51c2b63 100644 --- a/pkg/api/server/register_auth.go +++ b/pkg/api/server/register_auth.go @@ -7,5 +7,7 @@ import ( func (s *APIServer) registerAuthHandlers(r *mux.Router) error { r.Handle(VersionedPath("/auth"), s.APIHandler(handlers.UnsupportedHandler)) + // Added non version path to URI to support docker non versioned paths + r.Handle("/auth", s.APIHandler(handlers.UnsupportedHandler)) return nil } diff --git a/pkg/api/server/register_containers.go b/pkg/api/server/register_containers.go index 6aad7ff88..a87e8eaee 100644 --- a/pkg/api/server/register_containers.go +++ b/pkg/api/server/register_containers.go @@ -34,6 +34,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/create"), s.APIHandler(generic.CreateContainer)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/create", s.APIHandler(generic.CreateContainer)).Methods(http.MethodPost) // swagger:operation GET /containers/json compat listContainers // --- // tags: @@ -84,6 +86,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/json"), s.APIHandler(generic.ListContainers)).Methods(http.MethodGet) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/json", s.APIHandler(generic.ListContainers)).Methods(http.MethodGet) // swagger:operation POST /containers/prune compat pruneContainers // --- // tags: @@ -106,6 +110,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/prune"), s.APIHandler(handlers.PruneContainers)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/prune", s.APIHandler(handlers.PruneContainers)).Methods(http.MethodPost) // swagger:operation DELETE /containers/{name} compat removeContainer // --- // tags: @@ -145,6 +151,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/{name}"), s.APIHandler(generic.RemoveContainer)).Methods(http.MethodDelete) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/{name}", s.APIHandler(generic.RemoveContainer)).Methods(http.MethodDelete) // swagger:operation GET /containers/{name}/json compat getContainer // --- // tags: @@ -172,6 +180,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/{name}/json"), s.APIHandler(generic.GetContainer)).Methods(http.MethodGet) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/{name}/json", s.APIHandler(generic.GetContainer)).Methods(http.MethodGet) // swagger:operation POST /containers/{name}/kill compat killContainer // --- // tags: @@ -202,6 +212,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/{name}/kill"), s.APIHandler(generic.KillContainer)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/{name}/kill", s.APIHandler(generic.KillContainer)).Methods(http.MethodPost) // swagger:operation GET /containers/{name}/logs compat logsFromContainer // --- // tags: @@ -254,6 +266,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/{name}/logs"), s.APIHandler(generic.LogsFromContainer)).Methods(http.MethodGet) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/{name}/logs", s.APIHandler(generic.LogsFromContainer)).Methods(http.MethodGet) // swagger:operation POST /containers/{name}/pause compat pauseContainer // --- // tags: @@ -276,7 +290,11 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/{name}/pause"), s.APIHandler(handlers.PauseContainer)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/{name}/pause", s.APIHandler(handlers.PauseContainer)).Methods(http.MethodPost) r.HandleFunc(VersionedPath("/containers/{name}/rename"), s.APIHandler(handlers.UnsupportedHandler)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/{name}/rename", s.APIHandler(handlers.UnsupportedHandler)).Methods(http.MethodPost) // swagger:operation POST /containers/{name}/restart compat restartContainer // --- // tags: @@ -302,6 +320,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/{name}/restart"), s.APIHandler(handlers.RestartContainer)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/{name}/restart", s.APIHandler(handlers.RestartContainer)).Methods(http.MethodPost) // swagger:operation POST /containers/{name}/start compat startContainer // --- // tags: @@ -330,6 +350,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/{name}/start"), s.APIHandler(handlers.StartContainer)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/{name}/start", s.APIHandler(handlers.StartContainer)).Methods(http.MethodPost) // swagger:operation GET /containers/{name}/stats compat statsContainer // --- // tags: @@ -357,6 +379,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/{name}/stats"), s.APIHandler(generic.StatsContainer)).Methods(http.MethodGet) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/{name}/stats", s.APIHandler(generic.StatsContainer)).Methods(http.MethodGet) // swagger:operation POST /containers/{name}/stop compat stopContainer // --- // tags: @@ -385,6 +409,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/{name}/stop"), s.APIHandler(handlers.StopContainer)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/{name}/stop", s.APIHandler(handlers.StopContainer)).Methods(http.MethodPost) // swagger:operation GET /containers/{name}/top compat topContainer // --- // tags: @@ -410,6 +436,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/{name}/top"), s.APIHandler(handlers.TopContainer)).Methods(http.MethodGet) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/{name}/top", s.APIHandler(handlers.TopContainer)).Methods(http.MethodGet) // swagger:operation POST /containers/{name}/unpause compat unpauseContainer // --- // tags: @@ -432,6 +460,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/{name}/unpause"), s.APIHandler(handlers.UnpauseContainer)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/{name}/unpause", s.APIHandler(handlers.UnpauseContainer)).Methods(http.MethodPost) // swagger:operation POST /containers/{name}/wait compat waitContainer // --- // tags: @@ -465,6 +495,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/{name}/wait"), s.APIHandler(generic.WaitContainer)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/{name}/wait", s.APIHandler(generic.WaitContainer)).Methods(http.MethodPost) // swagger:operation POST /containers/{name}/attach compat attachContainer // --- // tags: @@ -520,6 +552,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/{name}/attach"), s.APIHandler(handlers.AttachContainer)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/{name}/attach", s.APIHandler(handlers.AttachContainer)).Methods(http.MethodPost) // swagger:operation POST /containers/{name}/resize compat resizeContainer // --- // tags: @@ -552,6 +586,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/containers/{name}/resize"), s.APIHandler(handlers.ResizeContainer)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/containers/{name}/resize", s.APIHandler(handlers.ResizeContainer)).Methods(http.MethodPost) /* libpod endpoints diff --git a/pkg/api/server/register_distribution.go b/pkg/api/server/register_distribution.go index f03662224..730129d5d 100644 --- a/pkg/api/server/register_distribution.go +++ b/pkg/api/server/register_distribution.go @@ -7,5 +7,7 @@ import ( func (s *APIServer) registerDistributionHandlers(r *mux.Router) error { r.HandleFunc(VersionedPath("/distribution/{name}/json"), handlers.UnsupportedHandler) + // Added non version path to URI to support docker non versioned paths + r.HandleFunc("/distribution/{name}/json", handlers.UnsupportedHandler) return nil } diff --git a/pkg/api/server/register_events.go b/pkg/api/server/register_events.go index bc3b62662..ea5d21882 100644 --- a/pkg/api/server/register_events.go +++ b/pkg/api/server/register_events.go @@ -35,5 +35,7 @@ func (s *APIServer) registerEventsHandlers(r *mux.Router) error { // 500: // "$ref": "#/responses/InternalError" r.Handle(VersionedPath("/events"), s.APIHandler(handlers.GetEvents)).Methods(http.MethodGet) + // Added non version path to URI to support docker non versioned paths + r.Handle("/events", s.APIHandler(handlers.GetEvents)).Methods(http.MethodGet) return nil } diff --git a/pkg/api/server/register_exec.go b/pkg/api/server/register_exec.go index ad62de3f5..76033a9ca 100644 --- a/pkg/api/server/register_exec.go +++ b/pkg/api/server/register_exec.go @@ -75,6 +75,8 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.Handle(VersionedPath("/containers/{name}/create"), s.APIHandler(handlers.CreateExec)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.Handle("/containers/{name}/create", s.APIHandler(handlers.CreateExec)).Methods(http.MethodPost) // swagger:operation POST /exec/{id}/start compat startExec // --- // tags: @@ -111,6 +113,8 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.Handle(VersionedPath("/exec/{id}/start"), s.APIHandler(handlers.StartExec)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.Handle("/exec/{id}/start", s.APIHandler(handlers.StartExec)).Methods(http.MethodPost) // swagger:operation POST /exec/{id}/resize compat resizeExec // --- // tags: @@ -142,6 +146,8 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.Handle(VersionedPath("/exec/{id}/resize"), s.APIHandler(handlers.ResizeExec)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.Handle("/exec/{id}/resize", s.APIHandler(handlers.ResizeExec)).Methods(http.MethodPost) // swagger:operation GET /exec/{id}/json compat inspectExec // --- // tags: @@ -164,6 +170,8 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.Handle(VersionedPath("/exec/{id}/json"), s.APIHandler(handlers.InspectExec)).Methods(http.MethodGet) + // Added non version path to URI to support docker non versioned paths + r.Handle("/exec/{id}/json", s.APIHandler(handlers.InspectExec)).Methods(http.MethodGet) /* libpod api follows diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go index db04ecdc9..8c75c4d04 100644 --- a/pkg/api/server/register_images.go +++ b/pkg/api/server/register_images.go @@ -48,7 +48,11 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.Handle(VersionedPath("/images/create"), s.APIHandler(generic.CreateImageFromImage)).Methods(http.MethodPost).Queries("fromImage", "{fromImage}") + // Added non version path to URI to support docker non versioned paths + r.Handle("/images/create", s.APIHandler(generic.CreateImageFromImage)).Methods(http.MethodPost).Queries("fromImage", "{fromImage}") r.Handle(VersionedPath("/images/create"), s.APIHandler(generic.CreateImageFromSrc)).Methods(http.MethodPost).Queries("fromSrc", "{fromSrc}") + // Added non version path to URI to support docker non versioned paths + r.Handle("/images/create", s.APIHandler(generic.CreateImageFromSrc)).Methods(http.MethodPost).Queries("fromSrc", "{fromSrc}") // swagger:operation GET /images/json compat listImages // --- // tags: @@ -84,6 +88,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // 500: // $ref: '#/responses/InternalError' r.Handle(VersionedPath("/images/json"), s.APIHandler(generic.GetImages)).Methods(http.MethodGet) + // Added non version path to URI to support docker non versioned paths + r.Handle("/images/json", s.APIHandler(generic.GetImages)).Methods(http.MethodGet) // swagger:operation POST /images/load compat importImage // --- // tags: @@ -108,6 +114,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // 500: // $ref: '#/responses/InternalError' r.Handle(VersionedPath("/images/load"), s.APIHandler(generic.LoadImages)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.Handle("/images/load", s.APIHandler(generic.LoadImages)).Methods(http.MethodPost) // swagger:operation POST /images/prune compat pruneImages // --- // tags: @@ -133,6 +141,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // 500: // $ref: '#/responses/InternalError' r.Handle(VersionedPath("/images/prune"), s.APIHandler(generic.PruneImages)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.Handle("/images/prune", s.APIHandler(generic.PruneImages)).Methods(http.MethodPost) // swagger:operation GET /images/search compat searchImages // --- // tags: @@ -166,6 +176,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // 500: // $ref: '#/responses/InternalError' r.Handle(VersionedPath("/images/search"), s.APIHandler(handlers.SearchImages)).Methods(http.MethodGet) + // Added non version path to URI to support docker non versioned paths + r.Handle("/images/search", s.APIHandler(handlers.SearchImages)).Methods(http.MethodGet) // swagger:operation DELETE /images/{name:.*} compat removeImage // --- // tags: @@ -198,6 +210,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // 500: // $ref: '#/responses/InternalError' r.Handle(VersionedPath("/images/{name:.*}"), s.APIHandler(handlers.RemoveImage)).Methods(http.MethodDelete) + // Added non version path to URI to support docker non versioned paths + r.Handle("/images/{name:.*}", s.APIHandler(handlers.RemoveImage)).Methods(http.MethodDelete) // swagger:operation GET /images/{name:.*}/get compat exportImage // --- // tags: @@ -221,6 +235,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // 500: // $ref: '#/responses/InternalError' r.Handle(VersionedPath("/images/{name:.*}/get"), s.APIHandler(generic.ExportImage)).Methods(http.MethodGet) + // Added non version path to URI to support docker non versioned paths + r.Handle("/images/{name:.*}/get", s.APIHandler(generic.ExportImage)).Methods(http.MethodGet) // swagger:operation GET /images/{name:.*}/history compat imageHistory // --- // tags: @@ -243,6 +259,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.Handle(VersionedPath("/images/{name:.*}/history"), s.APIHandler(handlers.HistoryImage)).Methods(http.MethodGet) + // Added non version path to URI to support docker non versioned paths + r.Handle("/images/{name:.*}/history", s.APIHandler(handlers.HistoryImage)).Methods(http.MethodGet) // swagger:operation GET /images/{name:.*}/json compat inspectImage // --- // tags: @@ -265,6 +283,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.Handle(VersionedPath("/images/{name:.*}/json"), s.APIHandler(generic.GetImage)).Methods(http.MethodGet) + // Added non version path to URI to support docker non versioned paths + r.Handle("/images/{name:.*}/json", s.APIHandler(generic.GetImage)).Methods(http.MethodGet) // swagger:operation POST /images/{name:.*}/tag compat tagImage // --- // tags: @@ -299,6 +319,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // 500: // $ref: '#/responses/InternalError' r.Handle(VersionedPath("/images/{name:.*}/tag"), s.APIHandler(handlers.TagImage)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.Handle("/images/{name:.*}/tag", s.APIHandler(handlers.TagImage)).Methods(http.MethodPost) // swagger:operation POST /commit compat commitContainer // --- // tags: @@ -344,6 +366,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // 500: // $ref: '#/responses/InternalError' r.Handle(VersionedPath("/commit"), s.APIHandler(generic.CommitContainer)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.Handle("/commit", s.APIHandler(generic.CommitContainer)).Methods(http.MethodPost) // swagger:operation POST /build compat buildImage // --- @@ -554,6 +578,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.Handle(VersionedPath("/build"), s.APIHandler(handlers.BuildImage)).Methods(http.MethodPost) + // Added non version path to URI to support docker non versioned paths + r.Handle("/build", s.APIHandler(handlers.BuildImage)).Methods(http.MethodPost) /* libpod endpoints */ @@ -942,6 +968,50 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // 500: // $ref: '#/responses/InternalError' r.Handle(VersionedPath("/libpod/images/{name:.*}/tag"), s.APIHandler(handlers.TagImage)).Methods(http.MethodPost) - + // swagger:operation POST /commit libpod libpodCommitContainer + // --- + // tags: + // - containers + // summary: Commit + // description: Create a new image from a container + // parameters: + // - in: query + // name: container + // type: string + // description: the name or ID of a container + // - in: query + // name: repo + // type: string + // description: the repository name for the created image + // - in: query + // name: tag + // type: string + // description: tag name for the created image + // - in: query + // name: comment + // type: string + // description: commit message + // - in: query + // name: author + // type: string + // description: author of the image + // - in: query + // name: pause + // type: boolean + // description: pause the container before committing it + // - in: query + // name: changes + // type: string + // description: instructions to apply while committing in Dockerfile format + // produces: + // - application/json + // responses: + // 201: + // description: no error + // 404: + // $ref: '#/responses/NoSuchImage' + // 500: + // $ref: '#/responses/InternalError' + r.Handle(VersionedPath("/commit"), s.APIHandler(generic.CommitContainer)).Methods(http.MethodPost) return nil } diff --git a/pkg/api/server/register_info.go b/pkg/api/server/register_info.go index 36c467cc3..975a19fef 100644 --- a/pkg/api/server/register_info.go +++ b/pkg/api/server/register_info.go @@ -22,5 +22,7 @@ func (s *APIServer) registerInfoHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.Handle(VersionedPath("/info"), s.APIHandler(generic.GetInfo)).Methods(http.MethodGet) + // Added non version path to URI to support docker non versioned paths + r.Handle("/info", s.APIHandler(generic.GetInfo)).Methods(http.MethodGet) return nil } diff --git a/pkg/api/server/register_monitor.go b/pkg/api/server/register_monitor.go index dbe0d27ce..b821efbaa 100644 --- a/pkg/api/server/register_monitor.go +++ b/pkg/api/server/register_monitor.go @@ -7,5 +7,7 @@ import ( func (s *APIServer) registerMonitorHandlers(r *mux.Router) error { r.Handle(VersionedPath("/monitor"), s.APIHandler(handlers.UnsupportedHandler)) + // Added non version path to URI to support docker non versioned paths + r.Handle("/monitor", s.APIHandler(handlers.UnsupportedHandler)) return nil } diff --git a/pkg/api/server/register_plugins.go b/pkg/api/server/register_plugins.go index 479a79d1f..50026f6ad 100644 --- a/pkg/api/server/register_plugins.go +++ b/pkg/api/server/register_plugins.go @@ -7,5 +7,7 @@ import ( func (s *APIServer) registerPluginsHandlers(r *mux.Router) error { r.Handle(VersionedPath("/plugins"), s.APIHandler(handlers.UnsupportedHandler)) + // Added non version path to URI to support docker non versioned paths + r.Handle("/plugins", s.APIHandler(handlers.UnsupportedHandler)) return nil } diff --git a/pkg/api/server/register_swarm.go b/pkg/api/server/register_swarm.go index e37ac4e41..8a5588268 100644 --- a/pkg/api/server/register_swarm.go +++ b/pkg/api/server/register_swarm.go @@ -16,6 +16,14 @@ func (s *APIServer) registerSwarmHandlers(r *mux.Router) error { r.PathPrefix("/v{version:[0-9.]+}/services/").HandlerFunc(noSwarm) r.PathPrefix("/v{version:[0-9.]+}/swarm/").HandlerFunc(noSwarm) r.PathPrefix("/v{version:[0-9.]+}/tasks/").HandlerFunc(noSwarm) + + // Added non version path to URI to support docker non versioned paths + r.PathPrefix("/configs/").HandlerFunc(noSwarm) + r.PathPrefix("/nodes/").HandlerFunc(noSwarm) + r.PathPrefix("/secrets/").HandlerFunc(noSwarm) + r.PathPrefix("/services/").HandlerFunc(noSwarm) + r.PathPrefix("/swarm/").HandlerFunc(noSwarm) + r.PathPrefix("/tasks/").HandlerFunc(noSwarm) return nil } diff --git a/pkg/api/server/register_system.go b/pkg/api/server/register_system.go index 188c1cdac..4776692f5 100644 --- a/pkg/api/server/register_system.go +++ b/pkg/api/server/register_system.go @@ -9,5 +9,7 @@ import ( func (s *APIServer) registerSystemHandlers(r *mux.Router) error { r.Handle(VersionedPath("/system/df"), s.APIHandler(generic.GetDiskUsage)).Methods(http.MethodGet) + // Added non version path to URI to support docker non versioned paths + r.Handle("/system/df", s.APIHandler(generic.GetDiskUsage)).Methods(http.MethodGet) return nil } diff --git a/pkg/api/tags.yaml b/pkg/api/tags.yaml index 3bf2bb64f..571f49e44 100644 --- a/pkg/api/tags.yaml +++ b/pkg/api/tags.yaml @@ -18,4 +18,4 @@ tags: - name: images (compat) description: Actions related to images for the compatibility endpoints - name: system (compat) - description: Actions related to Podman and compatiblity engines + description: Actions related to Podman and compatibility engines diff --git a/pkg/apparmor/apparmor.go b/pkg/apparmor/apparmor.go index 45c029c07..1e824550d 100644 --- a/pkg/apparmor/apparmor.go +++ b/pkg/apparmor/apparmor.go @@ -2,6 +2,7 @@ package apparmor import ( "errors" + libpodVersion "github.com/containers/libpod/version" ) diff --git a/pkg/bindings/containers/create.go b/pkg/bindings/containers/create.go index 43a3ef02d..495f9db49 100644 --- a/pkg/bindings/containers/create.go +++ b/pkg/bindings/containers/create.go @@ -11,7 +11,7 @@ import ( jsoniter "github.com/json-iterator/go" ) -func CreateWithSpec(ctx context.Context, s specgen.SpecGenerator) (utils.ContainerCreateResponse, error) { +func CreateWithSpec(ctx context.Context, s *specgen.SpecGenerator) (utils.ContainerCreateResponse, error) { var ccr utils.ContainerCreateResponse conn, err := bindings.GetClient(ctx) if err != nil { diff --git a/pkg/bindings/containers/healthcheck.go b/pkg/bindings/containers/healthcheck.go index dc607c1b3..3f94fad01 100644 --- a/pkg/bindings/containers/healthcheck.go +++ b/pkg/bindings/containers/healthcheck.go @@ -2,10 +2,10 @@ package containers import ( "context" - "github.com/containers/libpod/pkg/bindings" "net/http" "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/bindings" ) // RunHealthCheck executes the container's healthcheck and returns the health status of the diff --git a/pkg/bindings/test/common_test.go b/pkg/bindings/test/common_test.go index 1fc774074..a4d065a14 100644 --- a/pkg/bindings/test/common_test.go +++ b/pkg/bindings/test/common_test.go @@ -1,6 +1,7 @@ package test_bindings import ( + "context" "fmt" "io/ioutil" "os" @@ -8,6 +9,9 @@ import ( "path/filepath" "strings" + . "github.com/containers/libpod/pkg/bindings" + "github.com/containers/libpod/pkg/bindings/containers" + "github.com/containers/libpod/pkg/specgen" "github.com/onsi/ginkgo" "github.com/onsi/gomega/gexec" "github.com/pkg/errors" @@ -55,6 +59,16 @@ type bindingTest struct { tempDirPath string runRoot string crioRoot string + conn context.Context +} + +func (b *bindingTest) NewConnection() error { + connText, err := NewConnection(context.Background(), b.sock) + if err != nil { + return err + } + b.conn = connText + return nil } func (b *bindingTest) runPodman(command []string) *gexec.Session { @@ -173,17 +187,27 @@ func (b *bindingTest) restoreImageFromCache(i testImage) { // Run a container within or without a pod // and add or append the alpine image to it -func (b *bindingTest) RunTopContainer(containerName *string, insidePod *bool, podName *string) { - cmd := []string{"run", "-dt"} +func (b *bindingTest) RunTopContainer(containerName *string, insidePod *bool, podName *string) (string, error) { + s := specgen.NewSpecGenerator(alpine.name) + s.Terminal = false + s.Command = []string{"top"} + if containerName != nil { + s.Name = *containerName + } if insidePod != nil && podName != nil { - pName := *podName - cmd = append(cmd, "--pod", pName) - } else if containerName != nil { - cName := *containerName - cmd = append(cmd, "--name", cName) - } - cmd = append(cmd, alpine.name, "top") - b.runPodman(cmd).Wait(45) + s.Pod = *podName + } + ctr, err := containers.CreateWithSpec(b.conn, s) + if err != nil { + return "", nil + } + err = containers.Start(b.conn, ctr.ID, nil) + if err != nil { + return "", err + } + waiting := "running" + _, err = containers.Wait(b.conn, ctr.ID, &waiting) + return ctr.ID, err } // This method creates a pod with the given pod name. diff --git a/pkg/bindings/test/containers_test.go b/pkg/bindings/test/containers_test.go index 299a78ac2..e7ef620d4 100644 --- a/pkg/bindings/test/containers_test.go +++ b/pkg/bindings/test/containers_test.go @@ -1,7 +1,6 @@ package test_bindings import ( - "context" "net/http" "strconv" "time" @@ -18,7 +17,6 @@ var _ = Describe("Podman containers ", func() { var ( bt *bindingTest s *gexec.Session - connText context.Context err error falseFlag bool = false trueFlag bool = true @@ -29,18 +27,18 @@ var _ = Describe("Podman containers ", func() { bt.RestoreImagesFromCache() s = bt.startAPIService() time.Sleep(1 * time.Second) - connText, err = bindings.NewConnection(context.Background(), bt.sock) + err := bt.NewConnection() Expect(err).To(BeNil()) }) AfterEach(func() { s.Kill() - bt.cleanup() + //bt.cleanup() }) It("podman pause a bogus container", func() { // Pausing bogus container should return 404 - err = containers.Pause(connText, "foobar") + err = containers.Pause(bt.conn, "foobar") Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) @@ -48,7 +46,7 @@ var _ = Describe("Podman containers ", func() { It("podman unpause a bogus container", func() { // Unpausing bogus container should return 404 - err = containers.Unpause(connText, "foobar") + err = containers.Unpause(bt.conn, "foobar") Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) @@ -57,12 +55,13 @@ var _ = Describe("Podman containers ", func() { It("podman pause a running container by name", func() { // Pausing by name should work var name = "top" - bt.RunTopContainer(&name, &falseFlag, nil) - err := containers.Pause(connText, name) + _, err := bt.RunTopContainer(&name, &falseFlag, nil) + Expect(err).To(BeNil()) + err = containers.Pause(bt.conn, name) Expect(err).To(BeNil()) // Ensure container is paused - data, err := containers.Inspect(connText, name, nil) + data, err := containers.Inspect(bt.conn, name, nil) Expect(err).To(BeNil()) Expect(data.State.Status).To(Equal("paused")) }) @@ -70,54 +69,60 @@ var _ = Describe("Podman containers ", func() { It("podman pause a running container by id", func() { // Pausing by id should work var name = "top" - bt.RunTopContainer(&name, &falseFlag, nil) - data, err := containers.Inspect(connText, name, nil) + cid, err := bt.RunTopContainer(&name, &falseFlag, nil) Expect(err).To(BeNil()) - err = containers.Pause(connText, data.ID) + err = containers.Pause(bt.conn, cid) Expect(err).To(BeNil()) // Ensure container is paused - data, err = containers.Inspect(connText, data.ID, nil) + data, err := containers.Inspect(bt.conn, cid, nil) + Expect(err).To(BeNil()) Expect(data.State.Status).To(Equal("paused")) }) It("podman unpause a running container by name", func() { // Unpausing by name should work var name = "top" - bt.RunTopContainer(&name, &falseFlag, nil) - err := containers.Pause(connText, name) + _, err := bt.RunTopContainer(&name, &falseFlag, nil) Expect(err).To(BeNil()) - err = containers.Unpause(connText, name) + err = containers.Pause(bt.conn, name) + Expect(err).To(BeNil()) + err = containers.Unpause(bt.conn, name) Expect(err).To(BeNil()) // Ensure container is unpaused - data, err := containers.Inspect(connText, name, nil) + data, err := containers.Inspect(bt.conn, name, nil) + Expect(err).To(BeNil()) Expect(data.State.Status).To(Equal("running")) }) It("podman unpause a running container by ID", func() { // Unpausing by ID should work var name = "top" - bt.RunTopContainer(&name, &falseFlag, nil) - // Pause by name - err := containers.Pause(connText, name) - data, err := containers.Inspect(connText, name, nil) + _, err := bt.RunTopContainer(&name, &falseFlag, nil) Expect(err).To(BeNil()) - err = containers.Unpause(connText, data.ID) + // Pause by name + err = containers.Pause(bt.conn, name) + //paused := "paused" + //_, err = containers.Wait(bt.conn, cid, &paused) + //Expect(err).To(BeNil()) + err = containers.Unpause(bt.conn, name) Expect(err).To(BeNil()) // Ensure container is unpaused - data, err = containers.Inspect(connText, name, nil) + data, err := containers.Inspect(bt.conn, name, nil) + Expect(err).To(BeNil()) Expect(data.State.Status).To(Equal("running")) }) It("podman pause a paused container by name", func() { // Pausing a paused container by name should fail var name = "top" - bt.RunTopContainer(&name, &falseFlag, nil) - err := containers.Pause(connText, name) + _, err := bt.RunTopContainer(&name, &falseFlag, nil) Expect(err).To(BeNil()) - err = containers.Pause(connText, name) + err = containers.Pause(bt.conn, name) + Expect(err).To(BeNil()) + err = containers.Pause(bt.conn, name) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -126,12 +131,11 @@ var _ = Describe("Podman containers ", func() { It("podman pause a paused container by id", func() { // Pausing a paused container by id should fail var name = "top" - bt.RunTopContainer(&name, &falseFlag, nil) - data, err := containers.Inspect(connText, name, nil) + cid, err := bt.RunTopContainer(&name, &falseFlag, nil) Expect(err).To(BeNil()) - err = containers.Pause(connText, data.ID) + err = containers.Pause(bt.conn, cid) Expect(err).To(BeNil()) - err = containers.Pause(connText, data.ID) + err = containers.Pause(bt.conn, cid) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -140,10 +144,11 @@ var _ = Describe("Podman containers ", func() { It("podman pause a stopped container by name", func() { // Pausing a stopped container by name should fail var name = "top" - bt.RunTopContainer(&name, &falseFlag, nil) - err := containers.Stop(connText, name, nil) + _, err := bt.RunTopContainer(&name, &falseFlag, nil) + Expect(err).To(BeNil()) + err = containers.Stop(bt.conn, name, nil) Expect(err).To(BeNil()) - err = containers.Pause(connText, name) + err = containers.Pause(bt.conn, name) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -152,11 +157,11 @@ var _ = Describe("Podman containers ", func() { It("podman pause a stopped container by id", func() { // Pausing a stopped container by id should fail var name = "top" - bt.RunTopContainer(&name, &falseFlag, nil) - data, err := containers.Inspect(connText, name, nil) - err = containers.Stop(connText, data.ID, nil) + cid, err := bt.RunTopContainer(&name, &falseFlag, nil) Expect(err).To(BeNil()) - err = containers.Pause(connText, data.ID) + err = containers.Stop(bt.conn, cid, nil) + Expect(err).To(BeNil()) + err = containers.Pause(bt.conn, cid) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -165,12 +170,11 @@ var _ = Describe("Podman containers ", func() { It("podman remove a paused container by id without force", func() { // Removing a paused container without force should fail var name = "top" - bt.RunTopContainer(&name, &falseFlag, nil) - data, err := containers.Inspect(connText, name, nil) + cid, err := bt.RunTopContainer(&name, &falseFlag, nil) Expect(err).To(BeNil()) - err = containers.Pause(connText, data.ID) + err = containers.Pause(bt.conn, cid) Expect(err).To(BeNil()) - err = containers.Remove(connText, data.ID, &falseFlag, &falseFlag) + err = containers.Remove(bt.conn, cid, &falseFlag, &falseFlag) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -187,22 +191,22 @@ var _ = Describe("Podman containers ", func() { // Removing a paused container with force should work var name = "top" - bt.RunTopContainer(&name, &falseFlag, nil) - data, err := containers.Inspect(connText, name, nil) + cid, err := bt.RunTopContainer(&name, &falseFlag, nil) Expect(err).To(BeNil()) - err = containers.Pause(connText, data.ID) + err = containers.Pause(bt.conn, cid) Expect(err).To(BeNil()) - err = containers.Remove(connText, data.ID, &trueFlag, &falseFlag) + err = containers.Remove(bt.conn, cid, &trueFlag, &falseFlag) Expect(err).To(BeNil()) }) It("podman stop a paused container by name", func() { // Stopping a paused container by name should fail var name = "top" - bt.RunTopContainer(&name, &falseFlag, nil) - err := containers.Pause(connText, name) + _, err := bt.RunTopContainer(&name, &falseFlag, nil) + Expect(err).To(BeNil()) + err = containers.Pause(bt.conn, name) Expect(err).To(BeNil()) - err = containers.Stop(connText, name, nil) + err = containers.Stop(bt.conn, name, nil) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -211,12 +215,11 @@ var _ = Describe("Podman containers ", func() { It("podman stop a paused container by id", func() { // Stopping a paused container by id should fail var name = "top" - bt.RunTopContainer(&name, &falseFlag, nil) - data, err := containers.Inspect(connText, name, nil) + cid, err := bt.RunTopContainer(&name, &falseFlag, nil) Expect(err).To(BeNil()) - err = containers.Pause(connText, data.ID) + err = containers.Pause(bt.conn, cid) Expect(err).To(BeNil()) - err = containers.Stop(connText, data.ID, nil) + err = containers.Stop(bt.conn, cid, nil) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -225,12 +228,13 @@ var _ = Describe("Podman containers ", func() { It("podman stop a running container by name", func() { // Stopping a running container by name should work var name = "top" - bt.RunTopContainer(&name, &falseFlag, nil) - err := containers.Stop(connText, name, nil) + _, err := bt.RunTopContainer(&name, &falseFlag, nil) + Expect(err).To(BeNil()) + err = containers.Stop(bt.conn, name, nil) Expect(err).To(BeNil()) // Ensure container is stopped - data, err := containers.Inspect(connText, name, nil) + data, err := containers.Inspect(bt.conn, name, nil) Expect(err).To(BeNil()) Expect(isStopped(data.State.Status)).To(BeTrue()) }) @@ -238,14 +242,13 @@ var _ = Describe("Podman containers ", func() { It("podman stop a running container by ID", func() { // Stopping a running container by ID should work var name = "top" - bt.RunTopContainer(&name, &falseFlag, nil) - data, err := containers.Inspect(connText, name, nil) + cid, err := bt.RunTopContainer(&name, &falseFlag, nil) Expect(err).To(BeNil()) - err = containers.Stop(connText, data.ID, nil) + err = containers.Stop(bt.conn, cid, nil) Expect(err).To(BeNil()) // Ensure container is stopped - data, err = containers.Inspect(connText, name, nil) + data, err := containers.Inspect(bt.conn, name, nil) Expect(err).To(BeNil()) Expect(isStopped(data.State.Status)).To(BeTrue()) }) @@ -255,19 +258,20 @@ var _ = Describe("Podman containers ", func() { name = "top" exitCode int32 = -1 ) - _, err := containers.Wait(connText, "foobar", nil) + _, err := containers.Wait(bt.conn, "foobar", nil) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) errChan := make(chan error) - bt.RunTopContainer(&name, nil, nil) + _, err = bt.RunTopContainer(&name, nil, nil) + Expect(err).To(BeNil()) go func() { - exitCode, err = containers.Wait(connText, name, nil) + exitCode, err = containers.Wait(bt.conn, name, nil) errChan <- err close(errChan) }() - err = containers.Stop(connText, name, nil) + err = containers.Stop(bt.conn, name, nil) Expect(err).To(BeNil()) wait := <-errChan Expect(wait).To(BeNil()) @@ -282,13 +286,14 @@ var _ = Describe("Podman containers ", func() { unpause = "running" ) errChan := make(chan error) - bt.RunTopContainer(&name, nil, nil) + _, err := bt.RunTopContainer(&name, nil, nil) + Expect(err).To(BeNil()) go func() { - exitCode, err = containers.Wait(connText, name, &pause) + exitCode, err = containers.Wait(bt.conn, name, &pause) errChan <- err close(errChan) }() - err := containers.Pause(connText, name) + err = containers.Pause(bt.conn, name) Expect(err).To(BeNil()) wait := <-errChan Expect(wait).To(BeNil()) @@ -296,11 +301,11 @@ var _ = Describe("Podman containers ", func() { errChan = make(chan error) go func() { - exitCode, err = containers.Wait(connText, name, &unpause) + exitCode, err = containers.Wait(bt.conn, name, &unpause) errChan <- err close(errChan) }() - err = containers.Unpause(connText, name) + err = containers.Unpause(bt.conn, name) Expect(err).To(BeNil()) unPausewait := <-errChan Expect(unPausewait).To(BeNil()) diff --git a/pkg/bindings/test/create_test.go b/pkg/bindings/test/create_test.go new file mode 100644 index 000000000..f83a9b14d --- /dev/null +++ b/pkg/bindings/test/create_test.go @@ -0,0 +1,50 @@ +package test_bindings + +import ( + "time" + + "github.com/containers/libpod/pkg/bindings/containers" + "github.com/containers/libpod/pkg/specgen" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" +) + +var _ = Describe("Create containers ", func() { + var ( + bt *bindingTest + s *gexec.Session + ) + + BeforeEach(func() { + bt = newBindingTest() + bt.RestoreImagesFromCache() + s = bt.startAPIService() + time.Sleep(1 * time.Second) + err := bt.NewConnection() + Expect(err).To(BeNil()) + }) + + AfterEach(func() { + s.Kill() + bt.cleanup() + }) + + It("create a container running top", func() { + s := specgen.NewSpecGenerator(alpine.name) + s.Command = []string{"top"} + s.Terminal = true + s.Name = "top" + ctr, err := containers.CreateWithSpec(bt.conn, s) + Expect(err).To(BeNil()) + data, err := containers.Inspect(bt.conn, ctr.ID, nil) + Expect(err).To(BeNil()) + Expect(data.Name).To(Equal("top")) + err = containers.Start(bt.conn, ctr.ID, nil) + Expect(err).To(BeNil()) + data, err = containers.Inspect(bt.conn, ctr.ID, nil) + Expect(err).To(BeNil()) + Expect(data.State.Status).To(Equal("running")) + }) + +}) diff --git a/pkg/bindings/test/images_test.go b/pkg/bindings/test/images_test.go index 8eef28502..17b3b254a 100644 --- a/pkg/bindings/test/images_test.go +++ b/pkg/bindings/test/images_test.go @@ -1,7 +1,6 @@ package test_bindings import ( - "context" "net/http" "os" "path/filepath" @@ -22,7 +21,6 @@ var _ = Describe("Podman images", func() { //podmanTest *PodmanTestIntegration bt *bindingTest s *gexec.Session - connText context.Context err error falseFlag bool = false trueFlag bool = true @@ -40,7 +38,7 @@ var _ = Describe("Podman images", func() { bt.RestoreImagesFromCache() s = bt.startAPIService() time.Sleep(1 * time.Second) - connText, err = bindings.NewConnection(context.Background(), bt.sock) + err := bt.NewConnection() Expect(err).To(BeNil()) }) @@ -53,32 +51,32 @@ var _ = Describe("Podman images", func() { }) It("inspect image", func() { // Inspect invalid image be 404 - _, err = images.GetImage(connText, "foobar5000", nil) + _, err = images.GetImage(bt.conn, "foobar5000", nil) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) // Inspect by short name - data, err := images.GetImage(connText, alpine.shortName, nil) + data, err := images.GetImage(bt.conn, alpine.shortName, nil) Expect(err).To(BeNil()) // Inspect with full ID - _, err = images.GetImage(connText, data.ID, nil) + _, err = images.GetImage(bt.conn, data.ID, nil) Expect(err).To(BeNil()) // Inspect with partial ID - _, err = images.GetImage(connText, data.ID[0:12], nil) + _, err = images.GetImage(bt.conn, data.ID[0:12], nil) Expect(err).To(BeNil()) // Inspect by long name - _, err = images.GetImage(connText, alpine.name, nil) + _, err = images.GetImage(bt.conn, alpine.name, nil) Expect(err).To(BeNil()) // TODO it looks like the images API alwaays returns size regardless // of bool or not. What should we do ? //Expect(data.Size).To(BeZero()) // Enabling the size parameter should result in size being populated - data, err = images.GetImage(connText, alpine.name, &trueFlag) + data, err = images.GetImage(bt.conn, alpine.name, &trueFlag) Expect(err).To(BeNil()) Expect(data.Size).To(BeNumerically(">", 0)) }) @@ -86,49 +84,50 @@ var _ = Describe("Podman images", func() { // Test to validate the remove image api It("remove image", func() { // Remove invalid image should be a 404 - _, err = images.Remove(connText, "foobar5000", &falseFlag) + _, err = images.Remove(bt.conn, "foobar5000", &falseFlag) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) // Remove an image by name, validate image is removed and error is nil - inspectData, err := images.GetImage(connText, busybox.shortName, nil) + inspectData, err := images.GetImage(bt.conn, busybox.shortName, nil) Expect(err).To(BeNil()) - response, err := images.Remove(connText, busybox.shortName, nil) + response, err := images.Remove(bt.conn, busybox.shortName, nil) Expect(err).To(BeNil()) Expect(inspectData.ID).To(Equal(response[0]["Deleted"])) - inspectData, err = images.GetImage(connText, busybox.shortName, nil) + inspectData, err = images.GetImage(bt.conn, busybox.shortName, nil) code, _ = bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) // Start a container with alpine image var top string = "top" - bt.RunTopContainer(&top, &falseFlag, nil) + _, err = bt.RunTopContainer(&top, &falseFlag, nil) + Expect(err).To(BeNil()) // we should now have a container called "top" running - containerResponse, err := containers.Inspect(connText, "top", &falseFlag) + containerResponse, err := containers.Inspect(bt.conn, "top", &falseFlag) Expect(err).To(BeNil()) Expect(containerResponse.Name).To(Equal("top")) // try to remove the image "alpine". This should fail since we are not force // deleting hence image cannot be deleted until the container is deleted. - response, err = images.Remove(connText, alpine.shortName, &falseFlag) + response, err = images.Remove(bt.conn, alpine.shortName, &falseFlag) code, _ = bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) // Removing the image "alpine" where force = true - response, err = images.Remove(connText, alpine.shortName, &trueFlag) + response, err = images.Remove(bt.conn, alpine.shortName, &trueFlag) Expect(err).To(BeNil()) // Checking if both the images are gone as well as the container is deleted - inspectData, err = images.GetImage(connText, busybox.shortName, nil) + inspectData, err = images.GetImage(bt.conn, busybox.shortName, nil) code, _ = bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) - inspectData, err = images.GetImage(connText, alpine.shortName, nil) + inspectData, err = images.GetImage(bt.conn, alpine.shortName, nil) code, _ = bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) - _, err = containers.Inspect(connText, "top", &falseFlag) + _, err = containers.Inspect(bt.conn, "top", &falseFlag) code, _ = bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) }) @@ -136,17 +135,17 @@ var _ = Describe("Podman images", func() { // Tests to validate the image tag command. It("tag image", func() { // Validates if invalid image name is given a bad response is encountered. - err = images.Tag(connText, "dummy", "demo", alpine.shortName) + err = images.Tag(bt.conn, "dummy", "demo", alpine.shortName) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) - // Validates if the image is tagged sucessfully. - err = images.Tag(connText, alpine.shortName, "demo", alpine.shortName) + // Validates if the image is tagged successfully. + err = images.Tag(bt.conn, alpine.shortName, "demo", alpine.shortName) Expect(err).To(BeNil()) //Validates if name updates when the image is retagged. - _, err := images.GetImage(connText, "alpine:demo", nil) + _, err := images.GetImage(bt.conn, "alpine:demo", nil) Expect(err).To(BeNil()) }) @@ -154,7 +153,7 @@ var _ = Describe("Podman images", func() { // Test to validate the List images command. It("List image", func() { // Array to hold the list of images returned - imageSummary, err := images.List(connText, nil, nil) + imageSummary, err := images.List(bt.conn, nil, nil) // There Should be no errors in the response. Expect(err).To(BeNil()) // Since in the begin context two images are created the @@ -164,7 +163,7 @@ var _ = Describe("Podman images", func() { // Adding one more image. There Should be no errors in the response. // And the count should be three now. bt.Pull("busybox:glibc") - imageSummary, err = images.List(connText, nil, nil) + imageSummary, err = images.List(bt.conn, nil, nil) Expect(err).To(BeNil()) Expect(len(imageSummary)).To(Equal(3)) @@ -179,13 +178,13 @@ var _ = Describe("Podman images", func() { // List images with a filter filters := make(map[string][]string) filters["reference"] = []string{alpine.name} - filteredImages, err := images.List(connText, &falseFlag, filters) + filteredImages, err := images.List(bt.conn, &falseFlag, filters) Expect(err).To(BeNil()) Expect(len(filteredImages)).To(BeNumerically("==", 1)) // List images with a bad filter filters["name"] = []string{alpine.name} - _, err = images.List(connText, &falseFlag, filters) + _, err = images.List(bt.conn, &falseFlag, filters) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -193,64 +192,64 @@ var _ = Describe("Podman images", func() { It("Image Exists", func() { // exists on bogus image should be false, with no error - exists, err := images.Exists(connText, "foobar") + exists, err := images.Exists(bt.conn, "foobar") Expect(err).To(BeNil()) Expect(exists).To(BeFalse()) // exists with shortname should be true - exists, err = images.Exists(connText, alpine.shortName) + exists, err = images.Exists(bt.conn, alpine.shortName) Expect(err).To(BeNil()) Expect(exists).To(BeTrue()) // exists with fqname should be true - exists, err = images.Exists(connText, alpine.name) + exists, err = images.Exists(bt.conn, alpine.name) Expect(err).To(BeNil()) Expect(exists).To(BeTrue()) }) It("Load|Import Image", func() { // load an image - _, err := images.Remove(connText, alpine.name, nil) + _, err := images.Remove(bt.conn, alpine.name, nil) Expect(err).To(BeNil()) - exists, err := images.Exists(connText, alpine.name) + exists, err := images.Exists(bt.conn, alpine.name) Expect(err).To(BeNil()) Expect(exists).To(BeFalse()) f, err := os.Open(filepath.Join(ImageCacheDir, alpine.tarballName)) defer f.Close() Expect(err).To(BeNil()) - names, err := images.Load(connText, f, nil) + names, err := images.Load(bt.conn, f, nil) Expect(err).To(BeNil()) Expect(names).To(Equal(alpine.name)) - exists, err = images.Exists(connText, alpine.name) + exists, err = images.Exists(bt.conn, alpine.name) Expect(err).To(BeNil()) Expect(exists).To(BeTrue()) // load with a repo name f, err = os.Open(filepath.Join(ImageCacheDir, alpine.tarballName)) Expect(err).To(BeNil()) - _, err = images.Remove(connText, alpine.name, nil) + _, err = images.Remove(bt.conn, alpine.name, nil) Expect(err).To(BeNil()) - exists, err = images.Exists(connText, alpine.name) + exists, err = images.Exists(bt.conn, alpine.name) Expect(err).To(BeNil()) Expect(exists).To(BeFalse()) newName := "quay.io/newname:fizzle" - names, err = images.Load(connText, f, &newName) + names, err = images.Load(bt.conn, f, &newName) Expect(err).To(BeNil()) Expect(names).To(Equal(alpine.name)) - exists, err = images.Exists(connText, newName) + exists, err = images.Exists(bt.conn, newName) Expect(err).To(BeNil()) Expect(exists).To(BeTrue()) // load with a bad repo name should trigger a 500 f, err = os.Open(filepath.Join(ImageCacheDir, alpine.tarballName)) Expect(err).To(BeNil()) - _, err = images.Remove(connText, alpine.name, nil) + _, err = images.Remove(bt.conn, alpine.name, nil) Expect(err).To(BeNil()) - exists, err = images.Exists(connText, alpine.name) + exists, err = images.Exists(bt.conn, alpine.name) Expect(err).To(BeNil()) Expect(exists).To(BeFalse()) badName := "quay.io/newName:fizzle" - _, err = images.Load(connText, f, &badName) + _, err = images.Load(bt.conn, f, &badName) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -262,7 +261,7 @@ var _ = Describe("Podman images", func() { w, err := os.Create(filepath.Join(bt.tempDirPath, alpine.tarballName)) defer w.Close() Expect(err).To(BeNil()) - err = images.Export(connText, alpine.name, w, nil, nil) + err = images.Export(bt.conn, alpine.name, w, nil, nil) Expect(err).To(BeNil()) _, err = os.Stat(exportPath) Expect(err).To(BeNil()) @@ -272,9 +271,9 @@ var _ = Describe("Podman images", func() { It("Import Image", func() { // load an image - _, err = images.Remove(connText, alpine.name, nil) + _, err = images.Remove(bt.conn, alpine.name, nil) Expect(err).To(BeNil()) - exists, err := images.Exists(connText, alpine.name) + exists, err := images.Exists(bt.conn, alpine.name) Expect(err).To(BeNil()) Expect(exists).To(BeFalse()) f, err := os.Open(filepath.Join(ImageCacheDir, alpine.tarballName)) @@ -282,27 +281,27 @@ var _ = Describe("Podman images", func() { Expect(err).To(BeNil()) changes := []string{"CMD /bin/foobar"} testMessage := "test_import" - _, err = images.Import(connText, changes, &testMessage, &alpine.name, nil, f) + _, err = images.Import(bt.conn, changes, &testMessage, &alpine.name, nil, f) Expect(err).To(BeNil()) - exists, err = images.Exists(connText, alpine.name) + exists, err = images.Exists(bt.conn, alpine.name) Expect(err).To(BeNil()) Expect(exists).To(BeTrue()) - data, err := images.GetImage(connText, alpine.name, nil) + data, err := images.GetImage(bt.conn, alpine.name, nil) Expect(err).To(BeNil()) Expect(data.Comment).To(Equal(testMessage)) }) It("History Image", func() { // a bogus name should return a 404 - _, err := images.History(connText, "foobar") + _, err := images.History(bt.conn, "foobar") Expect(err).To(Not(BeNil())) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) var foundID bool - data, err := images.GetImage(connText, alpine.name, nil) + data, err := images.GetImage(bt.conn, alpine.name, nil) Expect(err).To(BeNil()) - history, err := images.History(connText, alpine.name) + history, err := images.History(bt.conn, alpine.name) Expect(err).To(BeNil()) for _, i := range history { if i.ID == data.ID { @@ -314,7 +313,7 @@ var _ = Describe("Podman images", func() { }) It("Search for an image", func() { - imgs, err := images.Search(connText, "alpine", nil, nil) + imgs, err := images.Search(bt.conn, "alpine", nil, nil) Expect(err).To(BeNil()) Expect(len(imgs)).To(BeNumerically(">", 1)) var foundAlpine bool @@ -328,21 +327,21 @@ var _ = Describe("Podman images", func() { // Search for alpine with a limit of 10 ten := 10 - imgs, err = images.Search(connText, "docker.io/alpine", &ten, nil) + imgs, err = images.Search(bt.conn, "docker.io/alpine", &ten, nil) Expect(err).To(BeNil()) Expect(len(imgs)).To(BeNumerically("<=", 10)) // Search for alpine with stars greater than 100 filters := make(map[string][]string) filters["stars"] = []string{"100"} - imgs, err = images.Search(connText, "docker.io/alpine", nil, filters) + imgs, err = images.Search(bt.conn, "docker.io/alpine", nil, filters) Expect(err).To(BeNil()) for _, i := range imgs { Expect(i.Stars).To(BeNumerically(">=", 100)) } // Search with a fqdn - imgs, err = images.Search(connText, "quay.io/libpod/alpine_nginx", nil, nil) + imgs, err = images.Search(bt.conn, "quay.io/libpod/alpine_nginx", nil, nil) Expect(len(imgs)).To(BeNumerically(">=", 1)) }) diff --git a/pkg/bindings/test/pods_test.go b/pkg/bindings/test/pods_test.go index afffee4e6..7e29265b7 100644 --- a/pkg/bindings/test/pods_test.go +++ b/pkg/bindings/test/pods_test.go @@ -1,7 +1,6 @@ package test_bindings import ( - "context" "net/http" "time" @@ -17,7 +16,6 @@ var _ = Describe("Podman pods", func() { var ( bt *bindingTest s *gexec.Session - connText context.Context newpod string err error trueFlag bool = true @@ -30,7 +28,7 @@ var _ = Describe("Podman pods", func() { bt.Podcreate(&newpod) s = bt.startAPIService() time.Sleep(1 * time.Second) - connText, err = bindings.NewConnection(context.Background(), bt.sock) + err := bt.NewConnection() Expect(err).To(BeNil()) }) @@ -41,13 +39,13 @@ var _ = Describe("Podman pods", func() { It("inspect pod", func() { //Inspect an invalid pod name - _, err := pods.Inspect(connText, "dummyname") + _, err := pods.Inspect(bt.conn, "dummyname") Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) //Inspect an valid pod name - response, err := pods.Inspect(connText, newpod) + response, err := pods.Inspect(bt.conn, newpod) Expect(err).To(BeNil()) Expect(response.Config.Name).To(Equal(newpod)) }) @@ -55,12 +53,13 @@ var _ = Describe("Podman pods", func() { // Test validates the list all api returns It("list pod", func() { //List all the pods in the current instance - podSummary, err := pods.List(connText, nil) + podSummary, err := pods.List(bt.conn, nil) Expect(err).To(BeNil()) Expect(len(podSummary)).To(Equal(1)) // Adding an alpine container to the existing pod - bt.RunTopContainer(nil, &trueFlag, &newpod) - podSummary, err = pods.List(connText, nil) + _, err = bt.RunTopContainer(nil, &trueFlag, &newpod) + Expect(err).To(BeNil()) + podSummary, err = pods.List(bt.conn, nil) // Verify no errors. Expect(err).To(BeNil()) // Verify number of containers in the pod. @@ -69,7 +68,7 @@ var _ = Describe("Podman pods", func() { // Add multiple pods and verify them by name and size. var newpod2 string = "newpod2" bt.Podcreate(&newpod2) - podSummary, err = pods.List(connText, nil) + podSummary, err = pods.List(bt.conn, nil) Expect(len(podSummary)).To(Equal(2)) var names []string for _, i := range podSummary { @@ -83,19 +82,19 @@ var _ = Describe("Podman pods", func() { // Validate list pod with filters //filters := make(map[string][]string) //filters["name"] = []string{newpod} - //filteredPods, err := pods.List(connText, filters) + //filteredPods, err := pods.List(bt.conn, filters) //Expect(err).To(BeNil()) //Expect(len(filteredPods)).To(BeNumerically("==", 1)) }) // The test validates if the exists responds It("exists pod", func() { - response, err := pods.Exists(connText, "dummyName") + response, err := pods.Exists(bt.conn, "dummyName") Expect(err).To(BeNil()) Expect(response).To(BeFalse()) // Should exit with no error and response should be true - response, err = pods.Exists(connText, "newpod") + response, err = pods.Exists(bt.conn, "newpod") Expect(err).To(BeNil()) Expect(response).To(BeTrue()) }) @@ -103,23 +102,24 @@ var _ = Describe("Podman pods", func() { // This test validates if All running containers within // each specified pod are paused and unpaused It("pause upause pod", func() { + // TODO fix this + Skip("Pod behavior is jacked right now.") // Pause invalid container - err := pods.Pause(connText, "dummyName") + err := pods.Pause(bt.conn, "dummyName") Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) // Adding an alpine container to the existing pod - bt.RunTopContainer(nil, &trueFlag, &newpod) - response, err := pods.Inspect(connText, newpod) + _, err = bt.RunTopContainer(nil, &trueFlag, &newpod) Expect(err).To(BeNil()) // Binding needs to be modified to inspect the pod state. - // Since we dont have a pod state we inspect the states of the containers within the pod. + // Since we don't have a pod state we inspect the states of the containers within the pod. // Pause a valid container - err = pods.Pause(connText, newpod) + err = pods.Pause(bt.conn, newpod) Expect(err).To(BeNil()) - response, err = pods.Inspect(connText, newpod) + response, err := pods.Inspect(bt.conn, newpod) Expect(response.State.Status).To(Equal(define.PodStatePaused)) for _, i := range response.Containers { Expect(define.StringToContainerStatus(i.State)). @@ -127,9 +127,9 @@ var _ = Describe("Podman pods", func() { } // Unpause a valid container - err = pods.Unpause(connText, newpod) + err = pods.Unpause(bt.conn, newpod) Expect(err).To(BeNil()) - response, err = pods.Inspect(connText, newpod) + response, err = pods.Inspect(bt.conn, newpod) Expect(response.State.Status).To(Equal(define.PodStateRunning)) for _, i := range response.Containers { Expect(define.StringToContainerStatus(i.State)). @@ -139,28 +139,28 @@ var _ = Describe("Podman pods", func() { It("start stop restart pod", func() { // Start an invalid pod - err = pods.Start(connText, "dummyName") + err = pods.Start(bt.conn, "dummyName") Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) // Stop an invalid pod - err = pods.Stop(connText, "dummyName", nil) + err = pods.Stop(bt.conn, "dummyName", nil) Expect(err).ToNot(BeNil()) code, _ = bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) // Restart an invalid pod - err = pods.Restart(connText, "dummyName") + err = pods.Restart(bt.conn, "dummyName") Expect(err).ToNot(BeNil()) code, _ = bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) // Start a valid pod and inspect status of each container - err = pods.Start(connText, newpod) + err = pods.Start(bt.conn, newpod) Expect(err).To(BeNil()) - response, err := pods.Inspect(connText, newpod) + response, err := pods.Inspect(bt.conn, newpod) Expect(response.State.Status).To(Equal(define.PodStateRunning)) for _, i := range response.Containers { Expect(define.StringToContainerStatus(i.State)). @@ -168,13 +168,13 @@ var _ = Describe("Podman pods", func() { } // Start an already running pod - err = pods.Start(connText, newpod) + err = pods.Start(bt.conn, newpod) Expect(err).To(BeNil()) // Stop the running pods - err = pods.Stop(connText, newpod, nil) + err = pods.Stop(bt.conn, newpod, nil) Expect(err).To(BeNil()) - response, _ = pods.Inspect(connText, newpod) + response, _ = pods.Inspect(bt.conn, newpod) Expect(response.State.Status).To(Equal(define.PodStateExited)) for _, i := range response.Containers { Expect(define.StringToContainerStatus(i.State)). @@ -182,12 +182,12 @@ var _ = Describe("Podman pods", func() { } // Stop an already stopped pod - err = pods.Stop(connText, newpod, nil) + err = pods.Stop(bt.conn, newpod, nil) Expect(err).To(BeNil()) - err = pods.Restart(connText, newpod) + err = pods.Restart(bt.conn, newpod) Expect(err).To(BeNil()) - response, _ = pods.Inspect(connText, newpod) + response, _ = pods.Inspect(bt.conn, newpod) Expect(response.State.Status).To(Equal(define.PodStateRunning)) for _, i := range response.Containers { Expect(define.StringToContainerStatus(i.State)). @@ -195,58 +195,58 @@ var _ = Describe("Podman pods", func() { } }) - // Test to validate all the pods in the stopped/exited state are pruned sucessfully. + // Test to validate all the pods in the stopped/exited state are pruned successfully. It("prune pod", func() { // Add a new pod var newpod2 string = "newpod2" bt.Podcreate(&newpod2) // No pods pruned since no pod in exited state - err = pods.Prune(connText) + err = pods.Prune(bt.conn) Expect(err).To(BeNil()) - podSummary, err := pods.List(connText, nil) + podSummary, err := pods.List(bt.conn, nil) Expect(err).To(BeNil()) Expect(len(podSummary)).To(Equal(2)) // Prune only one pod which is in exited state. // Start then stop a pod. // pod moves to exited state one pod should be pruned now. - err = pods.Start(connText, newpod) + err = pods.Start(bt.conn, newpod) Expect(err).To(BeNil()) - err = pods.Stop(connText, newpod, nil) + err = pods.Stop(bt.conn, newpod, nil) Expect(err).To(BeNil()) - response, err := pods.Inspect(connText, newpod) + response, err := pods.Inspect(bt.conn, newpod) Expect(response.State.Status).To(Equal(define.PodStateExited)) - err = pods.Prune(connText) + err = pods.Prune(bt.conn) Expect(err).To(BeNil()) - podSummary, err = pods.List(connText, nil) + podSummary, err = pods.List(bt.conn, nil) Expect(err).To(BeNil()) Expect(len(podSummary)).To(Equal(1)) // Test prune all pods in exited state. bt.Podcreate(&newpod) - err = pods.Start(connText, newpod) + err = pods.Start(bt.conn, newpod) Expect(err).To(BeNil()) - err = pods.Start(connText, newpod2) + err = pods.Start(bt.conn, newpod2) Expect(err).To(BeNil()) - err = pods.Stop(connText, newpod, nil) + err = pods.Stop(bt.conn, newpod, nil) Expect(err).To(BeNil()) - response, err = pods.Inspect(connText, newpod) + response, err = pods.Inspect(bt.conn, newpod) Expect(response.State.Status).To(Equal(define.PodStateExited)) for _, i := range response.Containers { Expect(define.StringToContainerStatus(i.State)). To(Equal(define.ContainerStateStopped)) } - err = pods.Stop(connText, newpod2, nil) + err = pods.Stop(bt.conn, newpod2, nil) Expect(err).To(BeNil()) - response, err = pods.Inspect(connText, newpod2) + response, err = pods.Inspect(bt.conn, newpod2) Expect(response.State.Status).To(Equal(define.PodStateExited)) for _, i := range response.Containers { Expect(define.StringToContainerStatus(i.State)). To(Equal(define.ContainerStateStopped)) } - err = pods.Prune(connText) + err = pods.Prune(bt.conn) Expect(err).To(BeNil()) - podSummary, err = pods.List(connText, nil) + podSummary, err = pods.List(bt.conn, nil) Expect(err).To(BeNil()) Expect(len(podSummary)).To(Equal(0)) }) diff --git a/pkg/hooks/exec/exec.go b/pkg/hooks/exec/exec.go index 4038e3d94..77b350573 100644 --- a/pkg/hooks/exec/exec.go +++ b/pkg/hooks/exec/exec.go @@ -5,13 +5,13 @@ import ( "bytes" "context" "fmt" - "github.com/sirupsen/logrus" "io" osexec "os/exec" "time" rspec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // DefaultPostKillTimeout is the recommended default post-kill timeout. diff --git a/pkg/lookup/lookup.go b/pkg/lookup/lookup.go index a249dd753..dff25f74f 100644 --- a/pkg/lookup/lookup.go +++ b/pkg/lookup/lookup.go @@ -4,7 +4,7 @@ import ( "os" "strconv" - "github.com/cyphar/filepath-securejoin" + securejoin "github.com/cyphar/filepath-securejoin" "github.com/opencontainers/runc/libcontainer/user" "github.com/sirupsen/logrus" ) diff --git a/pkg/spec/security.go b/pkg/spec/security.go index ca025eb3e..0f8d36f00 100644 --- a/pkg/spec/security.go +++ b/pkg/spec/security.go @@ -128,10 +128,10 @@ func (c *SecurityConfig) ConfigureGenerator(g *generate.Generator, user *UserCon privCapRequired := []string{} if !c.Privileged && len(c.CapRequired) > 0 { - // Pass CapRequired in CapAdd field to normalize capabilties names + // Pass CapRequired in CapAdd field to normalize capabilities names capRequired, err := capabilities.MergeCapabilities(nil, c.CapRequired, nil) if err != nil { - logrus.Errorf("capabilties requested by user or image are not valid: %q", strings.Join(c.CapRequired, ",")) + logrus.Errorf("capabilities requested by user or image are not valid: %q", strings.Join(c.CapRequired, ",")) } else { // Verify all capRequiered are in the defaultCapList for _, cap := range capRequired { @@ -143,7 +143,7 @@ func (c *SecurityConfig) ConfigureGenerator(g *generate.Generator, user *UserCon if len(privCapRequired) == 0 { defaultCaplist = capRequired } else { - logrus.Errorf("capabilties requested by user or image are not allowed by default: %q", strings.Join(privCapRequired, ",")) + logrus.Errorf("capabilities requested by user or image are not allowed by default: %q", strings.Join(privCapRequired, ",")) } } configSpec.Process.Capabilities.Bounding = defaultCaplist diff --git a/pkg/specgen/create.go b/pkg/specgen/create.go index 34f9ffac2..99a99083b 100644 --- a/pkg/specgen/create.go +++ b/pkg/specgen/create.go @@ -2,17 +2,17 @@ package specgen import ( "context" + "os" + "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/config" "github.com/containers/libpod/libpod/define" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "os" ) // MakeContainer creates a container based on the SpecGenerator func (s *SpecGenerator) MakeContainer(rt *libpod.Runtime) (*libpod.Container, error) { - var pod *libpod.Pod if err := s.validate(rt); err != nil { return nil, errors.Wrap(err, "invalid config provided") } @@ -21,7 +21,7 @@ func (s *SpecGenerator) MakeContainer(rt *libpod.Runtime) (*libpod.Container, er return nil, err } - options, err := s.createContainerOptions(rt, pod) + options, err := s.createContainerOptions(rt) if err != nil { return nil, err } @@ -45,7 +45,7 @@ func (s *SpecGenerator) MakeContainer(rt *libpod.Runtime) (*libpod.Container, er return rt.NewContainer(context.Background(), runtimeSpec, options...) } -func (s *SpecGenerator) createContainerOptions(rt *libpod.Runtime, pod *libpod.Pod) ([]libpod.CtrCreateOption, error) { +func (s *SpecGenerator) createContainerOptions(rt *libpod.Runtime) ([]libpod.CtrCreateOption, error) { var options []libpod.CtrCreateOption var err error @@ -60,6 +60,10 @@ func (s *SpecGenerator) createContainerOptions(rt *libpod.Runtime, pod *libpod.P options = append(options, libpod.WithName(s.Name)) } if s.Pod != "" { + pod, err := rt.LookupPod(s.Pod) + if err != nil { + return nil, err + } logrus.Debugf("adding container to pod %s", s.Pod) options = append(options, rt.WithPod(pod)) } @@ -115,7 +119,6 @@ func (s *SpecGenerator) createContainerOptions(rt *libpod.Runtime, pod *libpod.P } options = append(options, namespaceOptions...) - // TODO NetworkNS still needs to be done! if len(s.ConmonPidFile) > 0 { options = append(options, libpod.WithConmonPidFile(s.ConmonPidFile)) } diff --git a/pkg/specgen/namespaces.go b/pkg/specgen/namespaces.go index 79a83819a..fa2dee77d 100644 --- a/pkg/specgen/namespaces.go +++ b/pkg/specgen/namespaces.go @@ -70,9 +70,7 @@ func (n *Namespace) IsPrivate() bool { return n.NSMode == Private } -// validate perform simple validation on the namespace to make sure it is not -// invalid from the get-go -func (n *Namespace) validate() error { +func validateNetNS(n *Namespace) error { if n == nil { return nil } @@ -82,6 +80,15 @@ func (n *Namespace) validate() error { default: return errors.Errorf("invalid network %q", n.NSMode) } + return nil +} + +// validate perform simple validation on the namespace to make sure it is not +// invalid from the get-go +func (n *Namespace) validate() error { + if n == nil { + return nil + } // Path and From Container MUST have a string value set if n.NSMode == Path || n.NSMode == FromContainer { if len(n.Value) < 1 { diff --git a/pkg/specgen/validate.go b/pkg/specgen/validate.go index 78e4d8ad5..dd5ca3a55 100644 --- a/pkg/specgen/validate.go +++ b/pkg/specgen/validate.go @@ -138,7 +138,7 @@ func (s *SpecGenerator) validate(rt *libpod.Runtime) error { if err := s.IpcNS.validate(); err != nil { return err } - if err := s.NetNS.validate(); err != nil { + if err := validateNetNS(&s.NetNS); err != nil { return err } if err := s.PidNS.validate(); err != nil { diff --git a/pkg/systemd/generate/systemdgen.go b/pkg/systemd/generate/systemdgen.go index 404347828..00ddc63f3 100644 --- a/pkg/systemd/generate/systemdgen.go +++ b/pkg/systemd/generate/systemdgen.go @@ -80,6 +80,8 @@ const containerTemplate = `# {{.ServiceName}}.service [Unit] Description=Podman {{.ServiceName}}.service Documentation=man:podman-generate-systemd(1) +Wants=network.target +After=network-online.target {{- if .BoundToServices}} RefuseManualStart=yes RefuseManualStop=yes @@ -94,11 +96,11 @@ Before={{- range $index, $value := .RequiredServices -}}{{if $index}} {{end}}{{ [Service] Restart={{.RestartPolicy}} {{- if .New}} -ExecStartPre=/usr/bin/rm -f /%t/%n-pid /%t/%n-cid +ExecStartPre=/usr/bin/rm -f %t/%n-pid %t/%n-cid ExecStart={{.RunCommand}} -ExecStop={{.Executable}} stop --ignore --cidfile /%t/%n-cid {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}} -ExecStopPost={{.Executable}} rm --ignore -f --cidfile /%t/%n-cid -PIDFile=/%t/%n-pid +ExecStop={{.Executable}} stop --ignore --cidfile %t/%n-cid {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}} +ExecStopPost={{.Executable}} rm --ignore -f --cidfile %t/%n-cid +PIDFile=%t/%n-pid {{- else}} ExecStart={{.Executable}} start {{.ContainerName}} ExecStop={{.Executable}} stop {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}} {{.ContainerName}} @@ -158,8 +160,8 @@ func CreateContainerSystemdUnit(info *ContainerInfo, opts Options) (string, erro command := []string{ info.Executable, "run", - "--conmon-pidfile", "/%t/%n-pid", - "--cidfile", "/%t/%n-cid", + "--conmon-pidfile", "%t/%n-pid", + "--cidfile", "%t/%n-cid", "--cgroups=no-conmon", } command = append(command, info.CreateCommand[index:]...) diff --git a/pkg/systemd/generate/systemdgen_test.go b/pkg/systemd/generate/systemdgen_test.go index b74b75258..145296ea9 100644 --- a/pkg/systemd/generate/systemdgen_test.go +++ b/pkg/systemd/generate/systemdgen_test.go @@ -40,6 +40,8 @@ func TestCreateContainerSystemdUnit(t *testing.T) { [Unit] Description=Podman container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service Documentation=man:podman-generate-systemd(1) +Wants=network.target +After=network-online.target [Service] Restart=always @@ -58,6 +60,8 @@ WantedBy=multi-user.target` [Unit] Description=Podman container-foobar.service Documentation=man:podman-generate-systemd(1) +Wants=network.target +After=network-online.target [Service] Restart=always @@ -76,6 +80,8 @@ WantedBy=multi-user.target` [Unit] Description=Podman container-foobar.service Documentation=man:podman-generate-systemd(1) +Wants=network.target +After=network-online.target RefuseManualStart=yes RefuseManualStop=yes BindsTo=a.service b.service c.service pod.service @@ -98,6 +104,8 @@ WantedBy=multi-user.target` [Unit] Description=Podman pod-123abc.service Documentation=man:podman-generate-systemd(1) +Wants=network.target +After=network-online.target Requires=container-1.service container-2.service Before=container-1.service container-2.service @@ -118,14 +126,16 @@ WantedBy=multi-user.target` [Unit] Description=Podman jadda-jadda.service Documentation=man:podman-generate-systemd(1) +Wants=network.target +After=network-online.target [Service] Restart=always -ExecStartPre=/usr/bin/rm -f /%t/%n-pid /%t/%n-cid -ExecStart=/usr/bin/podman run --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid --cgroups=no-conmon --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN -ExecStop=/usr/bin/podman stop --ignore --cidfile /%t/%n-cid -t 42 -ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile /%t/%n-cid -PIDFile=/%t/%n-pid +ExecStartPre=/usr/bin/rm -f %t/%n-pid %t/%n-cid +ExecStart=/usr/bin/podman run --conmon-pidfile %t/%n-pid --cidfile %t/%n-cid --cgroups=no-conmon --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN +ExecStop=/usr/bin/podman stop --ignore --cidfile %t/%n-cid -t 42 +ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/%n-cid +PIDFile=%t/%n-pid KillMode=none Type=forking diff --git a/test/e2e/generate_systemd_test.go b/test/e2e/generate_systemd_test.go index d0dadd09d..31131a68b 100644 --- a/test/e2e/generate_systemd_test.go +++ b/test/e2e/generate_systemd_test.go @@ -191,7 +191,7 @@ var _ = Describe("Podman generate systemd", func() { found, _ := session.GrepString("# container-foo.service") Expect(found).To(BeTrue()) - found, _ = session.GrepString("stop --ignore --cidfile /%t/%n-cid -t 42") + found, _ = session.GrepString("stop --ignore --cidfile %t/%n-cid -t 42") Expect(found).To(BeTrue()) }) diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go index 22b111742..7e52bd838 100644 --- a/vendor/github.com/containernetworking/cni/libcni/api.go +++ b/vendor/github.com/containernetworking/cni/libcni/api.go @@ -409,6 +409,9 @@ func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net if err := utils.ValidateNetworkName(name); err != nil { return nil, err } + if err := utils.ValidateInterfaceName(rt.IfName); err != nil { + return nil, err + } newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) if err != nil { @@ -629,6 +632,9 @@ func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVers if err != nil { return err } + if expectedVersion == "" { + expectedVersion = "0.1.0" + } vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec) if err != nil { diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go index d31a44e87..3cdb4bc8d 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go @@ -60,8 +60,8 @@ func (args *Args) AsEnv() []string { pluginArgsStr = stringify(args.PluginArgs) } - // Duplicated values which come first will be overrided, so we must put the - // custom values in the end to avoid being overrided by the process environments. + // Duplicated values which come first will be overridden, so we must put the + // custom values in the end to avoid being overridden by the process environments. env = append(env, "CNI_COMMAND="+args.Command, "CNI_CONTAINERID="+args.ContainerID, diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go index ad8498ba2..4f89a5dda 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go @@ -44,10 +44,14 @@ func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData [ } func pluginErr(err error, output []byte) error { - if _, ok := err.(*exec.ExitError); ok { + if exitError, ok := err.(*exec.ExitError); ok { emsg := types.Error{} if len(output) == 0 { - emsg.Msg = "netplugin failed with no error message" + if len(exitError.Stderr) == 0 { + emsg.Msg = "netplugin failed with no error message" + } else { + emsg.Msg = fmt.Sprintf("netplugin failed: %q", string(exitError.Stderr)) + } } else if perr := json.Unmarshal(output, &emsg); perr != nil { emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(output), perr) } diff --git a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go index 53256167f..36f31678a 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go @@ -86,20 +86,6 @@ func (r *Result) PrintTo(writer io.Writer) error { return err } -// String returns a formatted string in the form of "[IP4: $1,][ IP6: $2,] DNS: $3" where -// $1 represents the receiver's IPv4, $2 represents the receiver's IPv6 and $3 the -// receiver's DNS. If $1 or $2 are nil, they won't be present in the returned string. -func (r *Result) String() string { - var str string - if r.IP4 != nil { - str = fmt.Sprintf("IP4:%+v, ", *r.IP4) - } - if r.IP6 != nil { - str += fmt.Sprintf("IP6:%+v, ", *r.IP6) - } - return fmt.Sprintf("%sDNS:%+v", str, r.DNS) -} - // IPConfig contains values necessary to configure an interface type IPConfig struct { IP net.IPNet diff --git a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go index 7267a2e6d..754cc6e72 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go @@ -207,23 +207,6 @@ func (r *Result) PrintTo(writer io.Writer) error { return err } -// String returns a formatted string in the form of "[Interfaces: $1,][ IP: $2,] DNS: $3" where -// $1 represents the receiver's Interfaces, $2 represents the receiver's IP addresses and $3 the -// receiver's DNS. If $1 or $2 are nil, they won't be present in the returned string. -func (r *Result) String() string { - var str string - if len(r.Interfaces) > 0 { - str += fmt.Sprintf("Interfaces:%+v, ", r.Interfaces) - } - if len(r.IPs) > 0 { - str += fmt.Sprintf("IP:%+v, ", r.IPs) - } - if len(r.Routes) > 0 { - str += fmt.Sprintf("Routes:%+v, ", r.Routes) - } - return fmt.Sprintf("%sDNS:%+v", str, r.DNS) -} - // Convert this old version result to the current CNI version result func (r *Result) Convert() (*Result, error) { return r, nil diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go index 3e185c1ce..3fa757a5d 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -100,9 +100,6 @@ type Result interface { // Prints the result in JSON format to provided writer PrintTo(writer io.Writer) error - - // Returns a JSON string representation of the result - String() string } func PrintResult(result Result, version string) error { diff --git a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go index 324c40dea..b8ec38874 100644 --- a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go +++ b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go @@ -15,14 +15,22 @@ package utils import ( + "bytes" + "fmt" "regexp" + "unicode" "github.com/containernetworking/cni/pkg/types" ) -// cniValidNameChars is the regexp used to validate valid characters in -// containerID and networkName -const cniValidNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.\-]` +const ( + // cniValidNameChars is the regexp used to validate valid characters in + // containerID and networkName + cniValidNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.\-]` + + // maxInterfaceNameLength is the length max of a valid interface name + maxInterfaceNameLength = 15 +) var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`) @@ -49,3 +57,28 @@ func ValidateNetworkName(networkName string) *types.Error { } return nil } + +// ValidateInterfaceName will validate the interface name based on the three rules below +// 1. The name must not be empty +// 2. The name must be less than 16 characters +// 3. The name must not be "." or ".." +// 3. The name must not contain / or : or any whitespace characters +// ref to https://github.com/torvalds/linux/blob/master/net/core/dev.c#L1024 +func ValidateInterfaceName(ifName string) *types.Error { + if len(ifName) == 0 { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is empty", "") + } + if len(ifName) > maxInterfaceNameLength { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is too long", fmt.Sprintf("interface name should be less than %d characters", maxInterfaceNameLength+1)) + } + if ifName == "." || ifName == ".." { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is . or ..", "") + } + for _, r := range bytes.Runes([]byte(ifName)) { + if r == '/' || r == ':' || unicode.IsSpace(r) { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name contains / or : or whitespace characters", "") + } + } + + return nil +} diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index cb9e6f7c4..3bf97a522 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,6 +2,15 @@ # Changelog +## v1.14.2 (2020-03-03) + Add Buildah pull request template + Bump to containers/storage v1.16.1 + run_linux: fix tight loop if file is not pollable + Bump github.com/opencontainers/selinux from 1.3.2 to 1.3.3 + Bump github.com/containers/common from 0.4.1 to 0.4.2 + Bump back to v1.15.0-dev + Add Containerfile to build a versioned stable image on quay.io + ## v1.14.1 (2020-02-27) Search for local runtime per values in containers.conf Set correct ownership on working directory diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 9a947c602..6d1d479b1 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -27,7 +27,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.14.1" + Version = "1.14.2" // The value we use to identify what type of information, currently a // serialized Builder structure, we are using as per-container state. // This should only be changed when we make incompatible changes to diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index a435377b1..b4c71bf6a 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,3 +1,12 @@ +- Changelog for v1.14.2 (2020-03-03) + * Add Buildah pull request template + * Bump to containers/storage v1.16.1 + * run_linux: fix tight loop if file is not pollable + * Bump github.com/opencontainers/selinux from 1.3.2 to 1.3.3 + * Bump github.com/containers/common from 0.4.1 to 0.4.2 + * Bump back to v1.15.0-dev + * Add Containerfile to build a versioned stable image on quay.io + - Changelog for v1.14.1 (2020-02-27) * Search for local runtime per values in containers.conf * Set correct ownership on working directory diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod index 0172da01b..72fbffe2c 100644 --- a/vendor/github.com/containers/buildah/go.mod +++ b/vendor/github.com/containers/buildah/go.mod @@ -4,9 +4,9 @@ go 1.12 require ( github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 - github.com/containers/common v0.4.1 + github.com/containers/common v0.4.2 github.com/containers/image/v5 v5.2.1 - github.com/containers/storage v1.16.0 + github.com/containers/storage v1.16.1 github.com/cyphar/filepath-securejoin v0.2.2 github.com/docker/distribution v2.7.1+incompatible github.com/docker/go-metrics v0.0.1 // indirect @@ -25,7 +25,7 @@ require ( github.com/opencontainers/runc v1.0.0-rc9 github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 github.com/opencontainers/runtime-tools v0.9.0 - github.com/opencontainers/selinux v1.3.2 + github.com/opencontainers/selinux v1.3.3 github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316 github.com/openshift/imagebuilder v1.1.1 github.com/pkg/errors v0.9.1 diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum index 60c040165..79dc064ce 100644 --- a/vendor/github.com/containers/buildah/go.sum +++ b/vendor/github.com/containers/buildah/go.sum @@ -101,6 +101,8 @@ github.com/containers/common v0.4.0 h1:LpX2J19cZKSpn4PBtbLX/tTk3JzTtaqRWbaEoX5YG github.com/containers/common v0.4.0/go.mod h1:AiPCv0ZcBOVshnup/X6MuaqkySZQZ3iBWfInjJFIl40= github.com/containers/common v0.4.1 h1:Uu7f2ZDM/5xsqOkZwIEVKSjUI3YxKjvNIY5x57kjaKo= github.com/containers/common v0.4.1/go.mod h1:m62kenckrWi5rZx32kaLje2Og0hpf6NsaTBn6+b+Oys= +github.com/containers/common v0.4.2 h1:O5d1gj/xdpQdZi0MEivRQ/7AeRaVeHdbSP/bvShw458= +github.com/containers/common v0.4.2/go.mod h1:m62kenckrWi5rZx32kaLje2Og0hpf6NsaTBn6+b+Oys= github.com/containers/conmon v2.0.10+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/image/v4 v4.0.1 h1:idNGHChj0Pyv3vLrxul2oSVMZLeFqpoq3CjLeVgapSQ= github.com/containers/image/v4 v4.0.1/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA= @@ -145,6 +147,8 @@ github.com/containers/storage v1.15.8 h1:ef7OfUMTpyq0PIVAhV7qfufEI92gAldk25nItri github.com/containers/storage v1.15.8/go.mod h1:zhvjIIl/fR6wt/lgqQAC+xanHQ+8gUQ0GBVeXYN81qI= github.com/containers/storage v1.16.0 h1:sD+s7BmiNBh61CuHN3j8PXGCwMtV9zPVJETAlshIf3w= github.com/containers/storage v1.16.0/go.mod h1:nqN09JSi1/RSI1UAUwDYXPRiGSlq5FPbNkN/xb0TfG0= +github.com/containers/storage v1.16.1 h1:gVLVqbqaoyopLJbcQ9PQdsnm8SzVy6Vw24fofwMgkE0= +github.com/containers/storage v1.16.1/go.mod h1:toFp72SLn/iyJ6YbrnrZ0bW63aH2Qw3dA8JVwL4ADPo= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= @@ -363,6 +367,8 @@ github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82 github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y= github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0= +github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= @@ -487,6 +493,8 @@ github.com/opencontainers/selinux v1.3.1 h1:dn2Rc3wTEvTB6iVqoFrKKeMb0uZ38ZheeyMu github.com/opencontainers/selinux v1.3.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= github.com/opencontainers/selinux v1.3.2 h1:DR4lL9SYVjgcTZKEZIncvDU06fKSc/eygjmNGOA3E1s= github.com/opencontainers/selinux v1.3.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= +github.com/opencontainers/selinux v1.3.3 h1:RX0wAeqtvVSYQcr017X3pFXPkLEtB6V4NjRD7gVQgg4= +github.com/opencontainers/selinux v1.3.3/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316 h1:enQG2QUGwug4fR1yM6hL0Fjzx6Km/exZY6RbSPwMu3o= github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316/go.mod h1:dv+J0b/HWai0QnMVb37/H0v36klkLBi2TNpPeWDxX10= github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible h1:s55wx8JIG/CKnewev892HifTBrtKzMdvgB3rm4rxC2s= diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index d2c0abf9b..6df6ef41a 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -1203,6 +1203,13 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy runCopyStdioPassData(copyPipes, stdioPipe, finishCopy, relayMap, relayBuffer, readDesc, writeDesc) } +func canRetry(err error) bool { + if errno, isErrno := err.(syscall.Errno); isErrno { + return errno == syscall.EINTR || errno == syscall.EAGAIN + } + return false +} + func runCopyStdioPassData(copyPipes bool, stdioPipe [][]int, finishCopy []int, relayMap map[int]int, relayBuffer map[int]*bytes.Buffer, readDesc map[int]string, writeDesc map[int]string) { closeStdin := false @@ -1250,7 +1257,7 @@ func runCopyStdioPassData(copyPipes bool, stdioPipe [][]int, finishCopy []int, r // If it's zero-length on our stdin and we're // using pipes, it's an EOF, so close the stdin // pipe's writing end. - if n == 0 && copyPipes && int(pollFd.Fd) == unix.Stdin { + if n == 0 && !canRetry(err) && int(pollFd.Fd) == unix.Stdin { removes[int(pollFd.Fd)] = struct{}{} } else if n > 0 { // Buffer the data in case we get blocked on where they need to go. diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index e4b38947d..3463adf90 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -19,7 +19,7 @@ env: #### # GCE project where images live IMAGE_PROJECT: "libpod-218412" - _BUILT_IMAGE_SUFFIX: "libpod-6228273469587456" + _BUILT_IMAGE_SUFFIX: "libpod-5874660151656448" FEDORA_CACHE_IMAGE_NAME: "fedora-31-${_BUILT_IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-30-${_BUILT_IMAGE_SUFFIX}" UBUNTU_CACHE_IMAGE_NAME: "ubuntu-19-${_BUILT_IMAGE_SUFFIX}" @@ -50,31 +50,50 @@ gce_instance: disk: 200 image_name: "${FEDORA_CACHE_IMAGE_NAME}" + testing_task: + depends_on: - lint + + # Not all $TEST_DRIVER combinations are valid for all OS types. + # Note: Nested-variable resolution happens at runtime, not eval. time. + # Use verbose logic for ease of reading/maintaining. + only_if: >- + ( $VM_IMAGE =~ '.*UBUNTU.*' && $TEST_DRIVER == "vfs" ) || + ( $VM_IMAGE =~ '.*UBUNTU.*' && $TEST_DRIVER == "aufs" ) || + ( $VM_IMAGE =~ '.*UBUNTU.*' && $TEST_DRIVER == "overlay" ) || + ( $VM_IMAGE =~ '.*UBUNTU.*' && $TEST_DRIVER == "fuse-overlay" ) || + ( $VM_IMAGE =~ '.*FEDORA.*' && $TEST_DRIVER != "aufs" ) + + allow_failures: $TEST_DRIVER == "devicemapper" + + env: + matrix: + VM_IMAGE: "${FEDORA_CACHE_IMAGE_NAME}" + VM_IMAGE: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + VM_IMAGE: "${UBUNTU_CACHE_IMAGE_NAME}" + # VM_IMAGE: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}" # No fuse3 support + matrix: # See ./contrib/cirrus/build_and_test.sh + TEST_DRIVER: "vfs" + TEST_DRIVER: "aufs" + TEST_DRIVER: "overlay" + TEST_DRIVER: "fuse-overlay" + TEST_DRIVER: "devicemapper" + TEST_DRIVER: "fuse-overlay-whiteout" + gce_instance: # Only need to specify differences from defaults (above) - matrix: # Duplicate this task for each matrix product. - image_name: "${FEDORA_CACHE_IMAGE_NAME}" - image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" - image_name: "${UBUNTU_CACHE_IMAGE_NAME}" - # image_name: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}" # No fuse3 support + image_name: "${VM_IMAGE}" # Separate scripts for separate outputs, makes debugging easier. setup_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}' build_and_test_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/build_and_test.sh |& ${_TIMESTAMP}' - # Log collection when job was successful - df_script: '${_DFCMD} || true' - rh_audit_log_script: '${_RAUDITCMD} || true' - ubuntu_audit_log_script: '${_UAUDITCMD} || true' - journal_log_script: '${_JOURNALCMD} || true' - - on_failure: # Script names must be different from above - failure_df_script: '${_DFCMD} || true' - failure_rh_audit_log_script: '${_RAUDITCMD} || true' - failure_ubuntu_audit_log_script: '${_UAUDITCMD} || true' - failure_journal_log_script: '${_JOURNALCMD} || true' + always: + df_script: '${_DFCMD} || true' + rh_audit_log_script: '${_RAUDITCMD} || true' + ubuntu_audit_log_script: '${_UAUDITCMD} || true' + journal_log_script: '${_JOURNALCMD} || true' lint_task: env: @@ -94,7 +113,7 @@ lint_task: meta_task: container: - image: "quay.io/libpod/imgts:latest" # see contrib/imgts + image: "quay.io/libpod/imgts:master" cpu: 1 memory: 1 diff --git a/vendor/github.com/containers/storage/.travis.yml b/vendor/github.com/containers/storage/.travis.yml deleted file mode 100644 index a7865729d..000000000 --- a/vendor/github.com/containers/storage/.travis.yml +++ /dev/null @@ -1,62 +0,0 @@ ---- - -sudo: required - -# N/B: host go env. not actually used, see .run_ci_tests.sh -language: go -go: - - master - -services: - - docker - -env: - # Ubuntu - - GO_VERSION="stable" - DISTRO="ubuntu" - - - GO_VERSION="1.12.12" - DISTRO="ubuntu" - - # Fedora - - GO_VERSION="stable" - DISTRO="fedora" - - - GO_VERSION="1.12.12" - DISTRO="fedora" - - # CentOS - - GO_VERSION="stable" - DISTRO="centos" - - - GO_VERSION="1.12.12" - DISTRO="centos" - -# GO_VERSION="stable" builds successfully, but tests fail on all platforms. -# Run the tests, but ignore the result (for now) -matrix: - allow_failures: - - env: GO_VERSION="stable" DISTRO="ubuntu" - - env: GO_VERSION="stable" DISTRO="fedora" - - env: GO_VERSION="stable" DISTRO="centos" - -before_install: - - sudo apt-get -qq update - - sudo apt-get -qq install realpath - -script: - - echo "Travis/host environment:" - - export TRAVIS_ENV="-e TRAVIS=$TRAVIS - -e CI=$CI - -e TRAVIS_COMMIT=$TRAVIS_COMMIT - -e TRAVIS_COMMIT_RANGE=$TRAVIS_COMMIT_RANGE - -e TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG - -e TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST - -e TRAVIS_PULL_REQUEST_SHA=$TRAVIS_PULL_REQUEST_SHA - -e TRAVIS_PULL_REQUEST_SLUG=$TRAVIS_PULL_REQUEST_SLUG - -e TRAVIS_BRANCH=$TRAVIS_BRANCH - -e TRAVIS_JOB_ID=$TRAVIS_JOB_ID - -e TRAVIS_BUILD_DIR=$TRAVIS_BUILD_DIR" - - env - - echo "Running tests in SPC using ./hack/run_ci_tests.sh" - - ./hack/run_ci_tests.sh diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 15b989e39..41c11ffb7 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.16.0 +1.16.1 diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod index 84dd86a20..073bb7d2b 100644 --- a/vendor/github.com/containers/storage/go.mod +++ b/vendor/github.com/containers/storage/go.mod @@ -7,19 +7,19 @@ require ( github.com/Microsoft/hcsshim v0.8.7 github.com/docker/docker v0.0.0-20171019062838-86f080cff091 // indirect github.com/docker/go-units v0.4.0 - github.com/klauspost/compress v1.10.0 + github.com/klauspost/compress v1.10.2 github.com/klauspost/cpuid v1.2.1 // indirect github.com/klauspost/pgzip v1.2.1 github.com/mattn/go-shellwords v1.0.10 github.com/mistifyio/go-zfs v2.1.1+incompatible github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/runc v1.0.0-rc9 - github.com/opencontainers/selinux v1.3.1 + github.com/opencontainers/selinux v1.3.3 github.com/pkg/errors v0.9.1 github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 github.com/sirupsen/logrus v1.4.2 github.com/spf13/pflag v1.0.3 // indirect - github.com/stretchr/testify v1.4.0 + github.com/stretchr/testify v1.5.1 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 github.com/tchap/go-patricia v2.3.0+incompatible github.com/vbatts/tar-split v0.11.1 diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum index c2029949a..7fd19b00b 100644 --- a/vendor/github.com/containers/storage/go.sum +++ b/vendor/github.com/containers/storage/go.sum @@ -81,6 +81,10 @@ github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82 github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y= github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.1 h1:a/QY0o9S6wCi0XhxaMX/QmusicNUqCqFugR6WKPOSoQ= +github.com/klauspost/compress v1.10.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0= +github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= @@ -121,6 +125,10 @@ github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqG github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= github.com/opencontainers/selinux v1.3.1 h1:dn2Rc3wTEvTB6iVqoFrKKeMb0uZ38ZheeyMu2h5C1TI= github.com/opencontainers/selinux v1.3.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= +github.com/opencontainers/selinux v1.3.2 h1:DR4lL9SYVjgcTZKEZIncvDU06fKSc/eygjmNGOA3E1s= +github.com/opencontainers/selinux v1.3.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= +github.com/opencontainers/selinux v1.3.3 h1:RX0wAeqtvVSYQcr017X3pFXPkLEtB6V4NjRD7gVQgg4= +github.com/opencontainers/selinux v1.3.3/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -151,6 +159,10 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.0 h1:DMOzIV76tmoDNE9pX6RSN0aDtCYeCg5VueieJaAo1uw= +github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index b7b73ed38..895b479de 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -13,6 +13,10 @@ runroot = "/var/run/containers/storage" # Primary Read/Write location of container storage graphroot = "/var/lib/containers/storage" +# Storage path for rootless users +# +# rootless_storage_path = "$HOME/.local/share/containers/storage" + [storage.options] # Storage options to be passed to underlying storage drivers @@ -107,7 +111,7 @@ mountopt = "nodev" # Value 0% disables # min_free_space = "10%" -# mkfsarg specifies extra mkfs arguments to be used when creating the base. +# mkfsarg specifies extra mkfs arguments to be used when creating the base # device. # mkfsarg = "" @@ -115,7 +119,7 @@ mountopt = "nodev" # size = "" # use_deferred_removal marks devicemapper block device for deferred removal. -# If the thinpool is in use when the driver attempts to remove it, the driver +# If the thinpool is in use when the driver attempts to remove it, the driver # tells the kernel to remove it as soon as possible. Note this does not free # up the disk space, use deferred deletion to fully remove the thinpool. # use_deferred_removal = "True" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index d978c476d..2c32e1504 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -139,6 +139,9 @@ type StoreOptions struct { // GraphRoot is the filesystem path under which we will store the // contents of layers, images, and containers. GraphRoot string `json:"root,omitempty"` + // RootlessStoragePath is the storage path for rootless users + // default $HOME/.local/share/containers/storage + RootlessStoragePath string `toml:"rootless_storage_path"` // GraphDriverName is the underlying storage driver that we'll be // using. It only needs to be specified the first time a Store is // initialized for a given RunRoot and GraphRoot. @@ -3288,10 +3291,11 @@ func DefaultConfigFile(rootless bool) (string, error) { // TOML-friendly explicit tables used for conversions. type tomlConfig struct { Storage struct { - Driver string `toml:"driver"` - RunRoot string `toml:"runroot"` - GraphRoot string `toml:"graphroot"` - Options cfg.OptionsConfig `toml:"options"` + Driver string `toml:"driver"` + RunRoot string `toml:"runroot"` + GraphRoot string `toml:"graphroot"` + RootlessStoragePath string `toml:"rootless_storage_path"` + Options cfg.OptionsConfig `toml:"options"` } `toml:"storage"` } @@ -3312,6 +3316,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { fmt.Printf("Failed to parse %s %v\n", configFile, err.Error()) return } + if os.Getenv("STORAGE_DRIVER") != "" { + config.Storage.Driver = os.Getenv("STORAGE_DRIVER") + } if config.Storage.Driver != "" { storeOptions.GraphDriverName = config.Storage.Driver } @@ -3321,6 +3328,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { if config.Storage.GraphRoot != "" { storeOptions.GraphRoot = config.Storage.GraphRoot } + if config.Storage.RootlessStoragePath != "" { + storeOptions.RootlessStoragePath = config.Storage.RootlessStoragePath + } for _, s := range config.Storage.Options.AdditionalImageStores { storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) } @@ -3364,11 +3374,8 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { } else { storeOptions.GIDMap = append(storeOptions.GIDMap, gidmap...) } - if os.Getenv("STORAGE_DRIVER") != "" { - storeOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER") - } - storeOptions.GraphDriverOptions = cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...) if os.Getenv("STORAGE_OPTS") != "" { storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...) diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index 7e4b27d0f..f1e94fd2b 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -4,7 +4,9 @@ import ( "fmt" "os" "os/exec" + "os/user" "path/filepath" + "regexp" "strconv" "strings" @@ -146,6 +148,7 @@ func getRootlessStorageOpts(rootlessUID int) (StoreOptions, error) { } opts.RunRoot = rootlessRuntime opts.GraphRoot = filepath.Join(dataDir, "containers", "storage") + opts.RootlessStoragePath = opts.GraphRoot if path, err := exec.LookPath("fuse-overlayfs"); err == nil { opts.GraphDriverName = "overlay" opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)} @@ -161,6 +164,7 @@ func getTomlStorage(storeOptions *StoreOptions) *tomlConfig { config.Storage.Driver = storeOptions.GraphDriverName config.Storage.RunRoot = storeOptions.RunRoot config.Storage.GraphRoot = storeOptions.GraphRoot + config.Storage.RootlessStoragePath = storeOptions.RootlessStoragePath for _, i := range storeOptions.GraphDriverOptions { s := strings.Split(i, "=") if s[0] == "overlay.mount_program" { @@ -227,6 +231,19 @@ func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { if storageOpts.GraphRoot == "" { storageOpts.GraphRoot = defaultRootlessGraphRoot } + if storageOpts.RootlessStoragePath != "" { + if err = validRootlessStoragePathFormat(storageOpts.RootlessStoragePath); err != nil { + return storageOpts, err + } + rootlessStoragePath := strings.Replace(storageOpts.RootlessStoragePath, "$HOME", homedir.Get(), -1) + rootlessStoragePath = strings.Replace(rootlessStoragePath, "$UID", strconv.Itoa(rootlessUID), -1) + usr, err := user.LookupId(strconv.Itoa(rootlessUID)) + if err != nil { + return storageOpts, err + } + rootlessStoragePath = strings.Replace(rootlessStoragePath, "$USER", usr.Username, -1) + storageOpts.GraphRoot = rootlessStoragePath + } } else { if err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil { return storageOpts, errors.Wrapf(err, "cannot make directory %s", filepath.Dir(storageConf)) @@ -248,3 +265,21 @@ func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { } return storageOpts, nil } + +// validRootlessStoragePathFormat checks if the environments contained in the path are accepted +func validRootlessStoragePathFormat(path string) error { + if !strings.Contains(path, "$") { + return nil + } + + splitPaths := strings.SplitAfter(path, "$") + validEnv := regexp.MustCompile(`^(HOME|USER|UID)([^a-zA-Z]|$)`).MatchString + if len(splitPaths) > 1 { + for _, p := range splitPaths[1:] { + if !validEnv(p) { + return errors.Errorf("Unrecognized environment variable") + } + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index d9948ab40..2b101d26b 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -48,6 +48,8 @@ const ( maxHashOffset = 1 << 24 skipNever = math.MaxInt32 + + debugDeflate = false ) type compressionLevel struct { @@ -59,15 +61,13 @@ type compressionLevel struct { // See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ var levels = []compressionLevel{ {}, // 0 - // Level 1-4 uses specialized algorithm - values not used + // Level 1-6 uses specialized algorithm - values not used {0, 0, 0, 0, 0, 1}, {0, 0, 0, 0, 0, 2}, {0, 0, 0, 0, 0, 3}, {0, 0, 0, 0, 0, 4}, - // For levels 5-6 we don't bother trying with lazy matches. - // Lazy matching is at least 30% slower, with 1.5% increase. - {6, 0, 12, 8, 12, 5}, - {8, 0, 24, 16, 16, 6}, + {0, 0, 0, 0, 0, 5}, + {0, 0, 0, 0, 0, 6}, // Levels 7-9 use increasingly more lazy matching // and increasingly stringent conditions for "good enough". {8, 8, 24, 16, skipNever, 7}, @@ -203,9 +203,8 @@ func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { // This is much faster than doing a full encode. // Should only be used after a start/reset. func (d *compressor) fillWindow(b []byte) { - // Do not fill window if we are in store-only mode, - // use constant or Snappy compression. - if d.level == 0 { + // Do not fill window if we are in store-only or huffman mode. + if d.level <= 0 { return } if d.fast != nil { @@ -368,7 +367,7 @@ func (d *compressor) deflateLazy() { // Sanity enables additional runtime tests. // It's intended to be used during development // to supplement the currently ad-hoc unit tests. - const sanity = false + const sanity = debugDeflate if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { return @@ -667,6 +666,7 @@ func (d *compressor) init(w io.Writer, level int) (err error) { default: return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) } + d.level = level return nil } @@ -720,6 +720,7 @@ func (d *compressor) close() error { return d.w.err } d.w.flush() + d.w.reset(nil) return d.w.err } @@ -750,8 +751,7 @@ func NewWriter(w io.Writer, level int) (*Writer, error) { // can only be decompressed by a Reader initialized with the // same dictionary. func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { - dw := &dictWriter{w} - zw, err := NewWriter(dw, level) + zw, err := NewWriter(w, level) if err != nil { return nil, err } @@ -760,14 +760,6 @@ func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { return zw, err } -type dictWriter struct { - w io.Writer -} - -func (w *dictWriter) Write(b []byte) (n int, err error) { - return w.w.Write(b) -} - // A Writer takes data written to it and writes the compressed // form of that data to an underlying writer (see NewWriter). type Writer struct { @@ -805,11 +797,12 @@ func (w *Writer) Close() error { // the result of NewWriter or NewWriterDict called with dst // and w's level and dictionary. func (w *Writer) Reset(dst io.Writer) { - if dw, ok := w.d.w.writer.(*dictWriter); ok { + if len(w.dict) > 0 { // w was created with NewWriterDict - dw.w = dst - w.d.reset(dw) - w.d.fillWindow(w.dict) + w.d.reset(dst) + if dst != nil { + w.d.fillWindow(w.dict) + } } else { // w was created with NewWriter w.d.reset(dst) diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 3d2fdcd77..6d4c1e98b 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -35,16 +35,16 @@ func newFastEnc(level int) fastEnc { } const ( - tableBits = 16 // Bits used in the table + tableBits = 15 // Bits used in the table tableSize = 1 << tableBits // Size of the table tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. baseMatchOffset = 1 // The smallest match offset baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 maxMatchOffset = 1 << 15 // The largest match offset - bTableBits = 18 // Bits used in the big tables + bTableBits = 17 // Bits used in the big tables bTableSize = 1 << bTableBits // Size of the table - allocHistory = maxStoreBlockSize * 20 // Size to preallocate for history. + allocHistory = maxStoreBlockSize * 10 // Size to preallocate for history. bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. ) @@ -92,7 +92,6 @@ func hash(u uint32) uint32 { } type tableEntry struct { - val uint32 offset int32 } diff --git a/vendor/github.com/klauspost/compress/flate/gen_inflate.go b/vendor/github.com/klauspost/compress/flate/gen_inflate.go new file mode 100644 index 000000000..c74a95fe7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/gen_inflate.go @@ -0,0 +1,274 @@ +// +build generate + +//go:generate go run $GOFILE && gofmt -w inflate_gen.go + +package main + +import ( + "os" + "strings" +) + +func main() { + f, err := os.Create("inflate_gen.go") + if err != nil { + panic(err) + } + defer f.Close() + types := []string{"*bytes.Buffer", "*bytes.Reader", "*bufio.Reader", "*strings.Reader"} + names := []string{"BytesBuffer", "BytesReader", "BufioReader", "StringsReader"} + imports := []string{"bytes", "bufio", "io", "strings", "math/bits"} + f.WriteString(`// Code generated by go generate gen_inflate.go. DO NOT EDIT. + +package flate + +import ( +`) + + for _, imp := range imports { + f.WriteString("\t\"" + imp + "\"\n") + } + f.WriteString(")\n\n") + + template := ` + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) $FUNCNAME$() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.($TYPE$) + moreBits := func() error { + c, err := fr.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil + } + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := fr.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & 31) + nb += 8 + } + chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & 31) + f.nb = nb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var n uint // number of bits extra + var length int + var err error + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).$FUNCNAME$ + f.stepState = stateInit + return + } + goto readLiteral + case v == 256: + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + } + length += int(f.b & uint32(1<<n-1)) + f.b >>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + } + dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + if debugDecode { + fmt.Println("huffsym:", err) + } + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<nb:", err) + } + f.err = err + return + } + } + extra |= int(f.b & uint32(1<<nb-1)) + f.b >>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > f.dict.histSize() { + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).$FUNCNAME$ // We need to continue this work + f.stepState = stateDict + return + } + goto readLiteral + } +} + +` + for i, t := range types { + s := strings.Replace(template, "$FUNCNAME$", "huffman"+names[i], -1) + s = strings.Replace(s, "$TYPE$", t, -1) + f.WriteString(s) + } + f.WriteString("func (f *decompressor) huffmanBlockDecoder() func() {\n") + f.WriteString("\tswitch f.r.(type) {\n") + for i, t := range types { + f.WriteString("\t\tcase " + t + ":\n") + f.WriteString("\t\t\treturn f.huffman" + names[i] + "\n") + } + f.WriteString("\t\tdefault:\n") + f.WriteString("\t\t\treturn f.huffmanBlockGeneric") + f.WriteString("\t}\n}\n") +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index 56ee6dc8b..53fe1d06e 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -484,6 +484,9 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n } } +// writeStoredHeader will write a stored header. +// If the stored block is only used for EOF, +// it is replaced with a fixed huffman block. func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { if w.err != nil { return @@ -493,6 +496,16 @@ func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 } + + // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes. + if length == 0 && isEof { + w.writeFixedHeader(isEof) + // EOB: 7 bits, value: 0 + w.writeBits(0, 7) + w.flush() + return + } + var flag int32 if isEof { flag = 1 diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index 9d8e81ad6..4c39a3018 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -109,8 +109,8 @@ func generateFixedOffsetEncoding() *huffmanEncoder { return h } -var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding() -var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding() +var fixedLiteralEncoding = generateFixedLiteralEncoding() +var fixedOffsetEncoding = generateFixedOffsetEncoding() func (h *huffmanEncoder) bitLength(freq []uint16) int { var total int diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 6dc5b5d06..7f175a4ec 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -106,7 +106,7 @@ const ( ) type huffmanDecoder struct { - min int // the minimum code length + maxRead int // the maximum number of bits we can read and not overread chunks *[huffmanNumChunks]uint16 // chunks as described above links [][]uint16 // overflow links linkMask uint32 // mask the width of the link table @@ -126,12 +126,12 @@ func (h *huffmanDecoder) init(lengths []int) bool { if h.chunks == nil { h.chunks = &[huffmanNumChunks]uint16{} } - if h.min != 0 { + if h.maxRead != 0 { *h = huffmanDecoder{chunks: h.chunks, links: h.links} } // Count number of codes of each length, - // compute min and max length. + // compute maxRead and max length. var count [maxCodeLen]int var min, max int for _, n := range lengths { @@ -178,7 +178,7 @@ func (h *huffmanDecoder) init(lengths []int) bool { return false } - h.min = min + h.maxRead = min chunks := h.chunks[:] for i := range chunks { chunks[i] = 0 @@ -342,7 +342,7 @@ func (f *decompressor) nextBlock() { // compressed, fixed Huffman tables f.hl = &fixedHuffmanDecoder f.hd = nil - f.huffmanBlock() + f.huffmanBlockDecoder()() case 2: // compressed, dynamic Huffman tables if f.err = f.readHuffman(); f.err != nil { @@ -350,7 +350,7 @@ func (f *decompressor) nextBlock() { } f.hl = &f.h1 f.hd = &f.h2 - f.huffmanBlock() + f.huffmanBlockDecoder()() default: // 3 is reserved. if debugDecode { @@ -543,12 +543,18 @@ func (f *decompressor) readHuffman() error { return CorruptInputError(f.roffset) } - // As an optimization, we can initialize the min bits to read at a time + // As an optimization, we can initialize the maxRead bits to read at a time // for the HLIT tree to the length of the EOB marker since we know that // every block must terminate with one. This preserves the property that // we never read any extra bytes after the end of the DEFLATE stream. - if f.h1.min < f.bits[endBlockMarker] { - f.h1.min = f.bits[endBlockMarker] + if f.h1.maxRead < f.bits[endBlockMarker] { + f.h1.maxRead = f.bits[endBlockMarker] + } + if !f.final { + // If not the final block, the smallest block possible is + // a predefined table, BTYPE=01, with a single EOB marker. + // This will take up 3 + 7 bits. + f.h1.maxRead += 10 } return nil @@ -558,7 +564,7 @@ func (f *decompressor) readHuffman() error { // hl and hd are the Huffman states for the lit/length values // and the distance values, respectively. If hd == nil, using the // fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBlock() { +func (f *decompressor) huffmanBlockGeneric() { const ( stateInit = iota // Zero value must be stateInit stateDict @@ -574,19 +580,64 @@ func (f *decompressor) huffmanBlock() { readLiteral: // Read literal and/or (length, distance) according to RFC section 3.2.3. { - v, err := f.huffSym(f.hl) - if err != nil { - f.err = err - return + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := f.r.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & 31) + nb += 8 + } + chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & 31) + f.nb = nb - n + v = int(chunk >> huffmanValueShift) + break + } + } } + var n uint // number of bits extra var length int + var err error switch { case v < 256: f.dict.writeByte(byte(v)) if f.dict.availWrite() == 0 { f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlock + f.step = (*decompressor).huffmanBlockGeneric f.stepState = stateInit return } @@ -714,7 +765,7 @@ copyHistory: if f.dict.availWrite() == 0 || f.copyLen > 0 { f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlock // We need to continue this work + f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work f.stepState = stateDict return } @@ -726,21 +777,33 @@ copyHistory: func (f *decompressor) dataBlock() { // Uncompressed. // Discard current half-byte. - f.nb = 0 - f.b = 0 + left := (f.nb) & 7 + f.nb -= left + f.b >>= left + + offBytes := f.nb >> 3 + // Unfilled values will be overwritten. + f.buf[0] = uint8(f.b) + f.buf[1] = uint8(f.b >> 8) + f.buf[2] = uint8(f.b >> 16) + f.buf[3] = uint8(f.b >> 24) + + f.roffset += int64(offBytes) + f.nb, f.b = 0, 0 // Length then ones-complement of length. - nr, err := io.ReadFull(f.r, f.buf[0:4]) + nr, err := io.ReadFull(f.r, f.buf[offBytes:4]) f.roffset += int64(nr) if err != nil { f.err = noEOF(err) return } - n := int(f.buf[0]) | int(f.buf[1])<<8 - nn := int(f.buf[2]) | int(f.buf[3])<<8 - if uint16(nn) != uint16(^n) { + n := uint16(f.buf[0]) | uint16(f.buf[1])<<8 + nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8 + if nn != ^n { if debugDecode { - fmt.Println("uint16(nn) != uint16(^n)", nn, ^n) + ncomp := ^n + fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp) } f.err = CorruptInputError(f.roffset) return @@ -752,7 +815,7 @@ func (f *decompressor) dataBlock() { return } - f.copyLen = n + f.copyLen = int(n) f.copyData() } @@ -816,7 +879,7 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { // with single element, huffSym must error on these two edge cases. In both // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. - n := uint(h.min) + n := uint(h.maxRead) // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go new file mode 100644 index 000000000..397dc1b1a --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -0,0 +1,922 @@ +// Code generated by go generate gen_inflate.go. DO NOT EDIT. + +package flate + +import ( + "bufio" + "bytes" + "fmt" + "math/bits" + "strings" +) + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesBuffer() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Buffer) + moreBits := func() error { + c, err := fr.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil + } + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := fr.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & 31) + nb += 8 + } + chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & 31) + f.nb = nb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var n uint // number of bits extra + var length int + var err error + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBytesBuffer + f.stepState = stateInit + return + } + goto readLiteral + case v == 256: + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + } + length += int(f.b & uint32(1<<n-1)) + f.b >>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + } + dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + if debugDecode { + fmt.Println("huffsym:", err) + } + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<nb:", err) + } + f.err = err + return + } + } + extra |= int(f.b & uint32(1<<nb-1)) + f.b >>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > f.dict.histSize() { + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work + f.stepState = stateDict + return + } + goto readLiteral + } +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Reader) + moreBits := func() error { + c, err := fr.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil + } + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := fr.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & 31) + nb += 8 + } + chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & 31) + f.nb = nb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var n uint // number of bits extra + var length int + var err error + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBytesReader + f.stepState = stateInit + return + } + goto readLiteral + case v == 256: + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + } + length += int(f.b & uint32(1<<n-1)) + f.b >>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + } + dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + if debugDecode { + fmt.Println("huffsym:", err) + } + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<nb:", err) + } + f.err = err + return + } + } + extra |= int(f.b & uint32(1<<nb-1)) + f.b >>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > f.dict.histSize() { + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBytesReader // We need to continue this work + f.stepState = stateDict + return + } + goto readLiteral + } +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBufioReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bufio.Reader) + moreBits := func() error { + c, err := fr.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil + } + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := fr.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & 31) + nb += 8 + } + chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & 31) + f.nb = nb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var n uint // number of bits extra + var length int + var err error + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBufioReader + f.stepState = stateInit + return + } + goto readLiteral + case v == 256: + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + } + length += int(f.b & uint32(1<<n-1)) + f.b >>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + } + dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + if debugDecode { + fmt.Println("huffsym:", err) + } + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<nb:", err) + } + f.err = err + return + } + } + extra |= int(f.b & uint32(1<<nb-1)) + f.b >>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > f.dict.histSize() { + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBufioReader // We need to continue this work + f.stepState = stateDict + return + } + goto readLiteral + } +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanStringsReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*strings.Reader) + moreBits := func() error { + c, err := fr.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil + } + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := fr.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & 31) + nb += 8 + } + chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & 31) + f.nb = nb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var n uint // number of bits extra + var length int + var err error + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanStringsReader + f.stepState = stateInit + return + } + goto readLiteral + case v == 256: + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + } + length += int(f.b & uint32(1<<n-1)) + f.b >>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + } + dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + if debugDecode { + fmt.Println("huffsym:", err) + } + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<nb:", err) + } + f.err = err + return + } + } + extra |= int(f.b & uint32(1<<nb-1)) + f.b >>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > f.dict.histSize() { + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.stepState = stateDict + return + } + goto readLiteral + } +} + +func (f *decompressor) huffmanBlockDecoder() func() { + switch f.r.(type) { + case *bytes.Buffer: + return f.huffmanBytesBuffer + case *bytes.Reader: + return f.huffmanBytesReader + case *bufio.Reader: + return f.huffmanBufioReader + case *strings.Reader: + return f.huffmanStringsReader + default: + return f.huffmanBlockGeneric + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go index 102fc74c7..1e5eea396 100644 --- a/vendor/github.com/klauspost/compress/flate/level1.go +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -16,7 +16,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin ) - if debugDecode && e.cur < 0 { + if debugDeflate && e.cur < 0 { panic(fmt.Sprint("e.cur < 0: ", e.cur)) } @@ -81,12 +81,12 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { } now := load6432(src, nextS) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + e.table[nextHash] = tableEntry{offset: s + e.cur} nextHash = hash(uint32(now)) offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == candidate.val { - e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} + if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } @@ -96,11 +96,11 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { nextS++ candidate = e.table[nextHash] now >>= 8 - e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + e.table[nextHash] = tableEntry{offset: s + e.cur} offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == candidate.val { - e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} + if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } cv = uint32(now) @@ -139,7 +139,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { // Index first pair after match end. if int(s+l+4) < len(src) { cv := load3232(src, s) - e.table[hash(cv)] = tableEntry{offset: s + e.cur, val: cv} + e.table[hash(cv)] = tableEntry{offset: s + e.cur} } goto emitRemainder } @@ -153,14 +153,14 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { x := load6432(src, s-2) o := e.cur + s - 2 prevHash := hash(uint32(x)) - e.table[prevHash] = tableEntry{offset: o, val: uint32(x)} + e.table[prevHash] = tableEntry{offset: o} x >>= 16 currHash := hash(uint32(x)) candidate = e.table[currHash] - e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x)} + e.table[currHash] = tableEntry{offset: o + 2} offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x) != candidate.val { + if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { cv = uint32(x >> 8) s++ break diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go index dc6b1d314..5b986a194 100644 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -18,7 +18,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { minNonLiteralBlockSize = 1 + 1 + inputMargin ) - if debugDecode && e.cur < 0 { + if debugDeflate && e.cur < 0 { panic(fmt.Sprint("e.cur < 0: ", e.cur)) } @@ -83,12 +83,12 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { } candidate = e.table[nextHash] now := load6432(src, nextS) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + e.table[nextHash] = tableEntry{offset: s + e.cur} nextHash = hash4u(uint32(now), bTableBits) offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == candidate.val { - e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} + if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } @@ -98,10 +98,10 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { nextS++ candidate = e.table[nextHash] now >>= 8 - e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + e.table[nextHash] = tableEntry{offset: s + e.cur} offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == candidate.val { + if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { break } cv = uint32(now) @@ -148,7 +148,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { // Index first pair after match end. if int(s+l+4) < len(src) { cv := load3232(src, s) - e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur, val: cv} + e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur} } goto emitRemainder } @@ -157,15 +157,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { for i := s - l + 2; i < s-5; i += 7 { x := load6432(src, int32(i)) nextHash := hash4u(uint32(x), bTableBits) - e.table[nextHash] = tableEntry{offset: e.cur + i, val: uint32(x)} + e.table[nextHash] = tableEntry{offset: e.cur + i} // Skip one x >>= 16 nextHash = hash4u(uint32(x), bTableBits) - e.table[nextHash] = tableEntry{offset: e.cur + i + 2, val: uint32(x)} + e.table[nextHash] = tableEntry{offset: e.cur + i + 2} // Skip one x >>= 16 nextHash = hash4u(uint32(x), bTableBits) - e.table[nextHash] = tableEntry{offset: e.cur + i + 4, val: uint32(x)} + e.table[nextHash] = tableEntry{offset: e.cur + i + 4} } // We could immediately start working at s now, but to improve @@ -178,14 +178,14 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { o := e.cur + s - 2 prevHash := hash4u(uint32(x), bTableBits) prevHash2 := hash4u(uint32(x>>8), bTableBits) - e.table[prevHash] = tableEntry{offset: o, val: uint32(x)} - e.table[prevHash2] = tableEntry{offset: o + 1, val: uint32(x >> 8)} + e.table[prevHash] = tableEntry{offset: o} + e.table[prevHash2] = tableEntry{offset: o + 1} currHash := hash4u(uint32(x>>16), bTableBits) candidate = e.table[currHash] - e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x >> 16)} + e.table[currHash] = tableEntry{offset: o + 2} offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x>>16) != candidate.val { + if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { cv = uint32(x >> 24) s++ break diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go index 1a3ff9b6b..c22b4244a 100644 --- a/vendor/github.com/klauspost/compress/flate/level3.go +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -15,7 +15,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { minNonLiteralBlockSize = 1 + 1 + inputMargin ) - if debugDecode && e.cur < 0 { + if debugDeflate && e.cur < 0 { panic(fmt.Sprint("e.cur < 0: ", e.cur)) } @@ -81,22 +81,26 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { } candidates := e.table[nextHash] now := load3232(src, nextS) - e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} + + // Safe offset distance until s + 4... + minOffset := e.cur + s - (maxMatchOffset - 4) + e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}} // Check both candidates candidate = candidates.Cur - offset := s - (candidate.offset - e.cur) - if cv == candidate.val { - if offset > maxMatchOffset { - cv = now - // Previous will also be invalid, we have nothing. - continue - } - o2 := s - (candidates.Prev.offset - e.cur) - if cv != candidates.Prev.val || o2 > maxMatchOffset { + if candidate.offset < minOffset { + cv = now + // Previous will also be invalid, we have nothing. + continue + } + + if cv == load3232(src, candidate.offset-e.cur) { + if candidates.Prev.offset < minOffset || cv != load3232(src, candidates.Prev.offset-e.cur) { break } // Both match and are valid, pick longest. + offset := s - (candidate.offset - e.cur) + o2 := s - (candidates.Prev.offset - e.cur) l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) if l2 > l1 { candidate = candidates.Prev @@ -106,11 +110,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { // We only check if value mismatches. // Offset will always be invalid in other cases. candidate = candidates.Prev - if cv == candidate.val { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - break - } + if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { + break } } cv = now @@ -158,7 +159,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { nextHash := hash(cv) e.table[nextHash] = tableEntryPrev{ Prev: e.table[nextHash].Cur, - Cur: tableEntry{offset: e.cur + t, val: cv}, + Cur: tableEntry{offset: e.cur + t}, } } goto emitRemainder @@ -170,21 +171,21 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { prevHash := hash(uint32(x)) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, + Cur: tableEntry{offset: e.cur + s - 3}, } x >>= 8 prevHash = hash(uint32(x)) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, + Cur: tableEntry{offset: e.cur + s - 2}, } x >>= 8 prevHash = hash(uint32(x)) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, + Cur: tableEntry{offset: e.cur + s - 1}, } x >>= 8 currHash := hash(uint32(x)) @@ -192,21 +193,18 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { cv = uint32(x) e.table[currHash] = tableEntryPrev{ Prev: candidates.Cur, - Cur: tableEntry{offset: s + e.cur, val: cv}, + Cur: tableEntry{offset: s + e.cur}, } // Check both candidates candidate = candidates.Cur - if cv == candidate.val { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - continue - } - } else { + minOffset := e.cur + s - (maxMatchOffset - 4) + + if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) { // We only check if value mismatches. // Offset will always be invalid in other cases. candidate = candidates.Prev - if cv == candidate.val { + if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { offset := s - (candidate.offset - e.cur) if offset <= maxMatchOffset { continue diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go index f3ecc9c4d..e62f0c02b 100644 --- a/vendor/github.com/klauspost/compress/flate/level4.go +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -13,7 +13,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin ) - if debugDecode && e.cur < 0 { + if debugDeflate && e.cur < 0 { panic(fmt.Sprint("e.cur < 0: ", e.cur)) } // Protect against e.cur wraparound. @@ -92,24 +92,24 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { sCandidate := e.table[nextHashS] lCandidate := e.bTable[nextHashL] next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + entry := tableEntry{offset: s + e.cur} e.table[nextHashS] = entry e.bTable[nextHashL] = entry t = lCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == lCandidate.val { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { // We got a long match. Use that. break } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { // Found a 4 match... lCandidate = e.bTable[hash7(next, tableBits)] // If the next long is a candidate, check if we should use that instead... lOff := nextS - (lCandidate.offset - e.cur) - if lOff < maxMatchOffset && lCandidate.val == uint32(next) { + if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) if l2 > l1 { s = nextS @@ -137,7 +137,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { if nextEmit < s { emitLiteral(dst, src[nextEmit:s]) } - if false { + if debugDeflate { if t >= s { panic("s-t") } @@ -160,8 +160,8 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { // Index first pair after match end. if int(s+8) < len(src) { cv := load6432(src, s) - e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur} + e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} } goto emitRemainder } @@ -171,20 +171,20 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { i := nextS if i < s-1 { cv := load6432(src, i) - t := tableEntry{offset: i + e.cur, val: uint32(cv)} - t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1} + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} e.bTable[hash7(cv, tableBits)] = t e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hash4u(t2.val, tableBits)] = t2 + e.table[hash4u(uint32(cv>>8), tableBits)] = t2 i += 3 for ; i < s-1; i += 3 { cv := load6432(src, i) - t := tableEntry{offset: i + e.cur, val: uint32(cv)} - t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1} + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} e.bTable[hash7(cv, tableBits)] = t e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hash4u(t2.val, tableBits)] = t2 + e.table[hash4u(uint32(cv>>8), tableBits)] = t2 } } } @@ -195,8 +195,8 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { o := e.cur + s - 1 prevHashS := hash4x64(x, tableBits) prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o, val: uint32(x)} - e.bTable[prevHashL] = tableEntry{offset: o, val: uint32(x)} + e.table[prevHashS] = tableEntry{offset: o} + e.bTable[prevHashL] = tableEntry{offset: o} cv = x >> 8 } diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 4e3916825..d513f1ffd 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -13,7 +13,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin ) - if debugDecode && e.cur < 0 { + if debugDeflate && e.cur < 0 { panic(fmt.Sprint("e.cur < 0: ", e.cur)) } @@ -100,7 +100,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { sCandidate := e.table[nextHashS] lCandidate := e.bTable[nextHashL] next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + entry := tableEntry{offset: s + e.cur} e.table[nextHashS] = entry eLong := &e.bTable[nextHashL] eLong.Cur, eLong.Prev = entry, eLong.Cur @@ -110,14 +110,14 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { t = lCandidate.Cur.offset - e.cur if s-t < maxMatchOffset { - if uint32(cv) == lCandidate.Cur.val { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { l = e.matchlen(s+4, t+4, src) + 4 ml1 := e.matchlen(s+4, t2+4, src) + 4 if ml1 > l { @@ -129,30 +129,30 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { break } t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur break } } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { // Found a 4 match... l = e.matchlen(s+4, t+4, src) + 4 lCandidate = e.bTable[nextHashL] // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur // If the next long is a candidate, use that... t2 := lCandidate.Cur.offset - e.cur if nextS-t2 < maxMatchOffset { - if lCandidate.Cur.val == uint32(next) { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { ml := e.matchlen(nextS+4, t2+4, src) + 4 if ml > l { t = t2 @@ -163,7 +163,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { } // If the previous long is a candidate, use that... t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && lCandidate.Prev.val == uint32(next) { + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { ml := e.matchlen(nextS+4, t2+4, src) + 4 if ml > l { t = t2 @@ -197,7 +197,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { if nextEmit < s { emitLiteral(dst, src[nextEmit:s]) } - if false { + if debugDeflate { if t >= s { panic(fmt.Sprintln("s-t", s, t)) } @@ -226,31 +226,31 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { i := s - l + 1 if i < s-1 { cv := load6432(src, i) - t := tableEntry{offset: i + e.cur, val: uint32(cv)} + t := tableEntry{offset: i + e.cur} e.table[hash4x64(cv, tableBits)] = t eLong := &e.bTable[hash7(cv, tableBits)] eLong.Cur, eLong.Prev = t, eLong.Cur // Do an long at i+1 cv >>= 8 - t = tableEntry{offset: t.offset + 1, val: uint32(cv)} + t = tableEntry{offset: t.offset + 1} eLong = &e.bTable[hash7(cv, tableBits)] eLong.Cur, eLong.Prev = t, eLong.Cur // We only have enough bits for a short entry at i+2 cv >>= 8 - t = tableEntry{offset: t.offset + 1, val: uint32(cv)} + t = tableEntry{offset: t.offset + 1} e.table[hash4x64(cv, tableBits)] = t // Skip one - otherwise we risk hitting 's' i += 4 for ; i < s-1; i += hashEvery { cv := load6432(src, i) - t := tableEntry{offset: i + e.cur, val: uint32(cv)} - t2 := tableEntry{offset: t.offset + 1, val: uint32(cv >> 8)} + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} eLong := &e.bTable[hash7(cv, tableBits)] eLong.Cur, eLong.Prev = t, eLong.Cur - e.table[hash4u(t2.val, tableBits)] = t2 + e.table[hash4u(uint32(cv>>8), tableBits)] = t2 } } } @@ -261,9 +261,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { o := e.cur + s - 1 prevHashS := hash4x64(x, tableBits) prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o, val: uint32(x)} + e.table[prevHashS] = tableEntry{offset: o} eLong := &e.bTable[prevHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: o, val: uint32(x)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur cv = x >> 8 } diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go index 00a311977..a52c80ea4 100644 --- a/vendor/github.com/klauspost/compress/flate/level6.go +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -13,7 +13,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin ) - if debugDecode && e.cur < 0 { + if debugDeflate && e.cur < 0 { panic(fmt.Sprint("e.cur < 0: ", e.cur)) } @@ -101,7 +101,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { sCandidate := e.table[nextHashS] lCandidate := e.bTable[nextHashL] next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + entry := tableEntry{offset: s + e.cur} e.table[nextHashS] = entry eLong := &e.bTable[nextHashL] eLong.Cur, eLong.Prev = entry, eLong.Cur @@ -112,17 +112,17 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { t = lCandidate.Cur.offset - e.cur if s-t < maxMatchOffset { - if uint32(cv) == lCandidate.Cur.val { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { // Long candidate matches at least 4 bytes. // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur // Check the previous long candidate as well. t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { l = e.matchlen(s+4, t+4, src) + 4 ml1 := e.matchlen(s+4, t2+4, src) + 4 if ml1 > l { @@ -135,17 +135,17 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { } // Current value did not match, but check if previous long value does. t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur break } } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { // Found a 4 match... l = e.matchlen(s+4, t+4, src) + 4 @@ -153,9 +153,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { lCandidate = e.bTable[nextHashL] // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur // Check repeat at s + repOff const repOff = 1 @@ -174,7 +174,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // If the next long is a candidate, use that... t2 = lCandidate.Cur.offset - e.cur if nextS-t2 < maxMatchOffset { - if lCandidate.Cur.val == uint32(next) { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { ml := e.matchlen(nextS+4, t2+4, src) + 4 if ml > l { t = t2 @@ -185,7 +185,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { } // If the previous long is a candidate, use that... t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && lCandidate.Prev.val == uint32(next) { + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { ml := e.matchlen(nextS+4, t2+4, src) + 4 if ml > l { t = t2 @@ -244,9 +244,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // Index after match end. for i := nextS + 1; i < int32(len(src))-8; i += 2 { cv := load6432(src, i) - e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur, val: uint32(cv)} + e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur} eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur, val: uint32(cv)}, eLong.Cur + eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur } goto emitRemainder } @@ -255,8 +255,8 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { if true { for i := nextS + 1; i < s-1; i += 2 { cv := load6432(src, i) - t := tableEntry{offset: i + e.cur, val: uint32(cv)} - t2 := tableEntry{offset: t.offset + 1, val: uint32(cv >> 8)} + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} eLong := &e.bTable[hash7(cv, tableBits)] eLong2 := &e.bTable[hash7(cv>>8, tableBits)] e.table[hash4x64(cv, tableBits)] = t diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go index 099c0ddbc..f9abf606d 100644 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -262,7 +262,7 @@ func (t *tokens) EstimatedBits() int { // AddMatch adds a match to the tokens. // This function is very sensitive to inlining and right on the border. func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { - if debugDecode { + if debugDeflate { if xlength >= maxMatchLength+baseMatchLength { panic(fmt.Errorf("invalid length: %v", xlength)) } @@ -281,7 +281,7 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { // AddMatchLong adds a match to the tokens, potentially longer than max match length. // Length should NOT have the base subtracted, only offset should. func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) { - if debugDecode { + if debugDeflate { if xoffset >= maxMatchOffset+baseMatchOffset { panic(fmt.Errorf("invalid offset: %v", xoffset)) } diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 507757d52..4f0eba22f 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -806,7 +806,7 @@ func (b *blockEnc) genCodes() { mlH[v]++ if v > mlMax { mlMax = v - if debug && mlMax > maxMatchLengthSymbol { + if debugAsserts && mlMax > maxMatchLengthSymbol { panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) } } @@ -821,13 +821,13 @@ func (b *blockEnc) genCodes() { } return int(max) } - if mlMax > maxMatchLengthSymbol { + if debugAsserts && mlMax > maxMatchLengthSymbol { panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) } - if ofMax > maxOffsetBits { + if debugAsserts && ofMax > maxOffsetBits { panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) } - if llMax > maxLiteralLengthSymbol { + if debugAsserts && llMax > maxLiteralLengthSymbol { panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) } diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 07321acb1..658ef7838 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -30,7 +30,7 @@ type byteBuffer interface { type byteBuf []byte func (b *byteBuf) readSmall(n int) []byte { - if debug && n > 8 { + if debugAsserts && n > 8 { panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) } bb := *b @@ -82,7 +82,7 @@ type readerWrapper struct { } func (r *readerWrapper) readSmall(n int) []byte { - if debug && n > 8 { + if debugAsserts && n > 8 { panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) } n2, err := io.ReadFull(r.r, r.tmp[:n]) diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 35a3cda91..73ac3c630 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -315,7 +315,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { if size > 1<<20 { size = 1 << 20 } - dst = make([]byte, 0, frame.WindowSize) + dst = make([]byte, 0, size) } dst, err = frame.runDecoder(dst, block) diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index ee3b09b02..0ffea7655 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -4,6 +4,8 @@ package zstd +import "fmt" + const ( dFastLongTableBits = 17 // Bits used in the long match table dFastLongTableSize = 1 << dFastLongTableBits // Size of the table @@ -29,7 +31,7 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur > (1<<30)+e.maxMatchOff { + for e.cur >= bufferReset { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} @@ -61,6 +63,7 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { e.longTable[i].offset = v } e.cur = e.maxMatchOff + break } s := e.addBlock(src) @@ -110,7 +113,7 @@ encodeLoop: canRepeat := len(blk.sequences) > 2 for { - if debug && canRepeat && offset1 == 0 { + if debugAsserts && canRepeat && offset1 == 0 { panic("offset0 was 0") } @@ -229,10 +232,10 @@ encodeLoop: // Reference encoder checks all 8 bytes, we only check 4, // but the likelihood of both the first 4 bytes and the hash matching should be enough. t = candidateL.offset - e.cur - if debug && s <= t { - panic("s <= t") + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) } - if debug && s-t > e.maxMatchOff { + if debugAsserts && s-t > e.maxMatchOff { panic("s - t >e.maxMatchOff") } if debugMatches { @@ -266,13 +269,13 @@ encodeLoop: } t = candidateS.offset - e.cur - if debug && s <= t { - panic("s <= t") + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) } - if debug && s-t > e.maxMatchOff { + if debugAsserts && s-t > e.maxMatchOff { panic("s - t >e.maxMatchOff") } - if debug && t < 0 { + if debugAsserts && t < 0 { panic("t<0") } if debugMatches { @@ -294,11 +297,11 @@ encodeLoop: offset2 = offset1 offset1 = s - t - if debug && s <= t { - panic("s <= t") + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) } - if debug && canRepeat && int(offset1) > len(src) { + if debugAsserts && canRepeat && int(offset1) > len(src) { panic("invalid offset") } @@ -424,7 +427,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - if e.cur > (1<<30)+e.maxMatchOff { + if e.cur >= bufferReset { for i := range e.table[:] { e.table[i] = tableEntry{} } @@ -545,10 +548,10 @@ encodeLoop: // Reference encoder checks all 8 bytes, we only check 4, // but the likelihood of both the first 4 bytes and the hash matching should be enough. t = candidateL.offset - e.cur - if debug && s <= t { - panic("s <= t") + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) } - if debug && s-t > e.maxMatchOff { + if debugAsserts && s-t > e.maxMatchOff { panic("s - t >e.maxMatchOff") } if debugMatches { @@ -582,13 +585,13 @@ encodeLoop: } t = candidateS.offset - e.cur - if debug && s <= t { - panic("s <= t") + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) } - if debug && s-t > e.maxMatchOff { + if debugAsserts && s-t > e.maxMatchOff { panic("s - t >e.maxMatchOff") } - if debug && t < 0 { + if debugAsserts && t < 0 { panic("t<0") } if debugMatches { @@ -610,8 +613,8 @@ encodeLoop: offset2 = offset1 offset1 = s - t - if debug && s <= t { - panic("s <= t") + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) } // Extend the 4-byte match as long as possible. diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 0bdddac5b..28134b158 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -5,6 +5,7 @@ package zstd import ( + "fmt" "math/bits" "github.com/klauspost/compress/zstd/internal/xxhash" @@ -74,7 +75,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur > (1<<30)+e.maxMatchOff { + for e.cur >= bufferReset { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} @@ -94,6 +95,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { e.table[i].offset = v } e.cur = e.maxMatchOff + break } s := e.addBlock(src) @@ -151,7 +153,7 @@ encodeLoop: canRepeat := len(blk.sequences) > 2 for { - if debug && canRepeat && offset1 == 0 { + if debugAsserts && canRepeat && offset1 == 0 { panic("offset0 was 0") } @@ -212,10 +214,10 @@ encodeLoop: if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { // found a regular match t = candidate.offset - e.cur - if debug && s <= t { - panic("s <= t") + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) } - if debug && s-t > e.maxMatchOff { + if debugAsserts && s-t > e.maxMatchOff { panic("s - t >e.maxMatchOff") } break @@ -225,13 +227,13 @@ encodeLoop: // found a regular match t = candidate2.offset - e.cur s++ - if debug && s <= t { - panic("s <= t") + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) } - if debug && s-t > e.maxMatchOff { + if debugAsserts && s-t > e.maxMatchOff { panic("s - t >e.maxMatchOff") } - if debug && t < 0 { + if debugAsserts && t < 0 { panic("t<0") } break @@ -246,11 +248,11 @@ encodeLoop: offset2 = offset1 offset1 = s - t - if debug && s <= t { - panic("s <= t") + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) } - if debug && canRepeat && int(offset1) > len(src) { + if debugAsserts && canRepeat && int(offset1) > len(src) { panic("invalid offset") } @@ -343,7 +345,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { } } // Protect against e.cur wraparound. - if e.cur > (1<<30)+e.maxMatchOff { + if e.cur >= bufferReset { for i := range e.table[:] { e.table[i] = tableEntry{} } @@ -456,10 +458,10 @@ encodeLoop: if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { // found a regular match t = candidate.offset - e.cur - if debug && s <= t { - panic("s <= t") + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) } - if debug && s-t > e.maxMatchOff { + if debugAsserts && s-t > e.maxMatchOff { panic("s - t >e.maxMatchOff") } break @@ -469,13 +471,13 @@ encodeLoop: // found a regular match t = candidate2.offset - e.cur s++ - if debug && s <= t { - panic("s <= t") + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) } - if debug && s-t > e.maxMatchOff { + if debugAsserts && s-t > e.maxMatchOff { panic("s - t >e.maxMatchOff") } - if debug && t < 0 { + if debugAsserts && t < 0 { panic("t<0") } break @@ -490,8 +492,8 @@ encodeLoop: offset2 = offset1 offset1 = s - t - if debug && s <= t { - panic("s <= t") + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) } // Extend the 4-byte match as long as possible. @@ -570,6 +572,9 @@ encodeLoop: } func (e *fastEncoder) addBlock(src []byte) int32 { + if debugAsserts && e.cur > bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + } // check if we have space already if len(e.hist)+len(src) > cap(e.hist) { if cap(e.hist) == 0 { @@ -608,15 +613,18 @@ func (e *fastEncoder) matchlenNoHist(s, t int32, src []byte) int32 { } func (e *fastEncoder) matchlen(s, t int32, src []byte) int32 { - if debug { + if debugAsserts { if s < 0 { - panic("s<0") + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) } if t < 0 { - panic("t<0") + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) } if s-t > e.maxMatchOff { - panic(s - t) + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) } } s1 := int(s) + maxMatchLength - 4 @@ -650,7 +658,10 @@ func (e *fastEncoder) Reset() { } e.hist = make([]byte, 0, l) } - // We offset current position so everything will be out of reach - e.cur += e.maxMatchOff + int32(len(e.hist)) + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } e.hist = e.hist[:0] } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 366dd66bd..4032fb9fc 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -156,7 +156,7 @@ func (e *Encoder) Write(p []byte) (n int, err error) { if err != nil { return n, err } - if debug && len(s.filling) > 0 { + if debugAsserts && len(s.filling) > 0 { panic(len(s.filling)) } } diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 40790747a..cda590b5f 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -50,7 +50,7 @@ type frameDec struct { const ( // The minimum Window_Size is 1 KB. MinWindowSize = 1 << 10 - MaxWindowSize = 1 << 30 + MaxWindowSize = 1 << 29 ) var ( diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go index 9efe34feb..e002be98b 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -118,7 +118,7 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { if int32(bitStream)&(threshold-1) < max { count = int32(bitStream) & (threshold - 1) - if debug && nbBits < 1 { + if debugAsserts && nbBits < 1 { panic("nbBits underflow") } bitCount += nbBits - 1 diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index 619836f52..aa9eba88b 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -327,7 +327,7 @@ func (s *fseEncoder) normalizeCount(length int) error { if err != nil { return err } - if debug { + if debugAsserts { err = s.validateNorm() if err != nil { return err @@ -336,7 +336,7 @@ func (s *fseEncoder) normalizeCount(length int) error { return s.buildCTable() } s.norm[largest] += stillToDistribute - if debug { + if debugAsserts { err := s.validateNorm() if err != nil { return err @@ -619,7 +619,7 @@ func (s *fseEncoder) writeCount(out []byte) ([]byte, error) { func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 threshold := (minNbBits + 1) << 16 - if debug { + if debugAsserts { if !(s.actualTableLog < 16) { panic("!s.actualTableLog < 16") } @@ -633,7 +633,7 @@ func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { // linear interpolation (very approximate) normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog bitMultiplier := uint32(1) << accuracyLog - if debug { + if debugAsserts { if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s index d580e32ae..2c9c5357a 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -179,13 +179,13 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40 MOVQ ·prime2v(SB), R14 // Load slice. - MOVQ b_base+8(FP), CX - MOVQ b_len+16(FP), DX + MOVQ arg1_base+8(FP), CX + MOVQ arg1_len+16(FP), DX LEAQ (CX)(DX*1), BX SUBQ $32, BX // Load vN from d. - MOVQ d+0(FP), AX + MOVQ arg+0(FP), AX MOVQ 0(AX), R8 // v1 MOVQ 8(AX), R9 // v2 MOVQ 16(AX), R10 // v3 @@ -209,7 +209,7 @@ blockLoop: MOVQ R11, 24(AX) // The number of bytes written is CX minus the old base pointer. - SUBQ b_base+8(FP), CX + SUBQ arg1_base+8(FP), CX MOVQ CX, ret+32(FP) RET diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 57a8a2f5b..5e0b64ccc 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -6,11 +6,20 @@ package zstd import ( "errors" "log" + "math" "math/bits" ) +// enable debug printing const debug = false + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details const debugSequences = false + +// print detailed matching information const debugMatches = false // force encoder to use predefined tables. @@ -19,6 +28,9 @@ const forcePreDef = false // zstdMinMatch is the minimum zstd match length. const zstdMinMatch = 3 +// Reset the buffer offset when reaching this. +const bufferReset = math.MaxInt32 - MaxWindowSize + var ( // ErrReservedBlockType is returned when a reserved block type is found. // Typically this indicates wrong or corrupted input. diff --git a/vendor/modules.txt b/vendor/modules.txt index 20a3ac69d..53745057c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -48,7 +48,7 @@ github.com/containerd/containerd/errdefs github.com/containerd/continuity/fs github.com/containerd/continuity/syscallx github.com/containerd/continuity/sysx -# github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 +# github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921 github.com/containernetworking/cni/libcni github.com/containernetworking/cni/pkg/invoke github.com/containernetworking/cni/pkg/types @@ -62,7 +62,7 @@ github.com/containernetworking/plugins/pkg/ns github.com/containernetworking/plugins/pkg/utils/hwaddr github.com/containernetworking/plugins/plugins/ipam/host-local/backend github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator -# github.com/containers/buildah v1.14.1 +# github.com/containers/buildah v1.14.2 github.com/containers/buildah github.com/containers/buildah/bind github.com/containers/buildah/chroot @@ -142,7 +142,7 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.16.0 +# github.com/containers/storage v1.16.1 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs @@ -314,7 +314,7 @@ github.com/inconshreveable/mousetrap github.com/ishidawataru/sctp # github.com/json-iterator/go v1.1.9 github.com/json-iterator/go -# github.com/klauspost/compress v1.10.0 +# github.com/klauspost/compress v1.10.2 github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 |