summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.copr/Makefile4
-rw-r--r--.copr/prepare.sh17
-rw-r--r--.gitignore4
-rwxr-xr-xAPI.md14
-rw-r--r--Dockerfile6
-rw-r--r--Dockerfile.centos4
-rw-r--r--Dockerfile.fedora2
-rw-r--r--Makefile36
-rw-r--r--cmd/podman/attach.go3
-rw-r--r--cmd/podman/checkpoint.go2
-rw-r--r--cmd/podman/cleanup.go2
-rw-r--r--cmd/podman/cliconfig/config.go35
-rw-r--r--cmd/podman/common.go43
-rw-r--r--cmd/podman/containers_prune.go21
-rw-r--r--cmd/podman/exec.go3
-rw-r--r--cmd/podman/images.go10
-rw-r--r--cmd/podman/images_prune.go20
-rw-r--r--cmd/podman/init.go2
-rw-r--r--cmd/podman/kill.go2
-rw-r--r--cmd/podman/main_local.go4
-rw-r--r--cmd/podman/mount.go2
-rw-r--r--cmd/podman/pod_kill.go2
-rw-r--r--cmd/podman/pod_pause.go2
-rw-r--r--cmd/podman/pod_restart.go2
-rw-r--r--cmd/podman/pod_rm.go6
-rw-r--r--cmd/podman/pod_start.go2
-rw-r--r--cmd/podman/pod_stop.go4
-rw-r--r--cmd/podman/pod_unpause.go2
-rw-r--r--cmd/podman/pods_prune.go4
-rw-r--r--cmd/podman/port.go2
-rw-r--r--cmd/podman/reset.go71
-rw-r--r--cmd/podman/restart.go2
-rw-r--r--cmd/podman/restore.go2
-rw-r--r--cmd/podman/rm.go14
-rw-r--r--cmd/podman/shared/container.go24
-rw-r--r--cmd/podman/shared/create.go50
-rw-r--r--cmd/podman/start.go3
-rw-r--r--cmd/podman/stop.go6
-rw-r--r--cmd/podman/system.go1
-rw-r--r--cmd/podman/system_prune.go6
-rw-r--r--cmd/podman/system_renumber.go4
-rw-r--r--cmd/podman/umount.go2
-rw-r--r--cmd/podman/varlink/io.podman.varlink9
-rw-r--r--cmd/podman/version.go66
-rw-r--r--commands.md172
-rw-r--r--completions/bash/podman30
-rwxr-xr-x[-rw-r--r--]contrib/build_rpm.sh43
-rw-r--r--contrib/spec/podman.spec.in44
-rw-r--r--docs/source/markdown/podman-container-prune.1.md22
-rw-r--r--docs/source/markdown/podman-create.1.md23
-rw-r--r--docs/source/markdown/podman-image-prune.1.md41
-rw-r--r--docs/source/markdown/podman-images.1.md4
-rw-r--r--docs/source/markdown/podman-pod-prune.1.md11
-rw-r--r--docs/source/markdown/podman-pod-rm.1.md10
-rw-r--r--docs/source/markdown/podman-pod-stop.1.md6
-rw-r--r--docs/source/markdown/podman-pod.1.md4
-rw-r--r--docs/source/markdown/podman-rm.1.md16
-rw-r--r--docs/source/markdown/podman-run.1.md23
-rw-r--r--docs/source/markdown/podman-stop.1.md14
-rw-r--r--docs/source/markdown/podman-system-reset.1.md25
-rw-r--r--docs/source/markdown/podman-system.1.md5
-rw-r--r--docs/source/markdown/podman-version.1.md4
-rw-r--r--docs/source/markdown/podman.1.md39
-rw-r--r--docs/source/pod.rst4
-rw-r--r--go.mod10
-rw-r--r--go.sum20
-rwxr-xr-xhack/get_release_info.sh5
-rw-r--r--install.md6
-rw-r--r--libpod/boltdb_state_linux.go8
-rw-r--r--libpod/config/config.go19
-rw-r--r--libpod/container.go12
-rw-r--r--libpod/container_api.go17
-rw-r--r--libpod/container_commit.go169
-rw-r--r--libpod/container_internal.go10
-rw-r--r--libpod/container_internal_linux.go37
-rw-r--r--libpod/define/containerstate.go7
-rw-r--r--libpod/image/image.go150
-rw-r--r--libpod/image/prune.go79
-rw-r--r--libpod/networking_linux.go34
-rw-r--r--libpod/oci_util.go19
-rw-r--r--libpod/options.go92
-rw-r--r--libpod/reset.go107
-rw-r--r--libpod/runtime.go18
-rw-r--r--libpod/runtime_ctr.go55
-rw-r--r--libpod/runtime_img.go3
-rw-r--r--libpod/runtime_migrate_unsupported.go4
-rw-r--r--libpod/util.go25
-rw-r--r--libpod/volume_internal.go12
-rw-r--r--pkg/adapter/containers.go113
-rw-r--r--pkg/adapter/containers_remote.go2
-rw-r--r--pkg/adapter/pods.go109
-rw-r--r--pkg/adapter/reset.go13
-rw-r--r--pkg/adapter/reset_remote.go12
-rw-r--r--pkg/adapter/runtime.go6
-rw-r--r--pkg/adapter/runtime_remote.go71
-rw-r--r--pkg/adapter/shortcuts/shortcuts.go32
-rw-r--r--pkg/rootless/rootless_linux.c33
-rw-r--r--pkg/timetype/timestamp.go131
-rw-r--r--pkg/timetype/timestamp_test.go95
-rw-r--r--pkg/util/utils.go310
-rw-r--r--pkg/util/utils_supported.go4
-rw-r--r--pkg/util/utils_test.go308
-rw-r--r--pkg/varlinkapi/images.go12
-rw-r--r--pkg/varlinkapi/pods.go2
-rw-r--r--pkg/varlinkapi/system.go22
-rw-r--r--rootless.md2
-rw-r--r--test/e2e/build/basicalpine/Containerfile1
-rw-r--r--test/e2e/build/context_dir_a_file0
-rw-r--r--test/e2e/build/squash/Dockerfile.squash-a2
-rw-r--r--test/e2e/build/squash/Dockerfile.squash-b2
-rw-r--r--test/e2e/build/squash/Dockerfile.squash-c3
-rw-r--r--test/e2e/build/squash/alpinetest.tgzbin0 -> 332 bytes
-rw-r--r--test/e2e/build_test.go108
-rw-r--r--test/e2e/checkpoint_test.go43
-rw-r--r--test/e2e/common_test.go9
-rw-r--r--test/e2e/images_test.go1
-rw-r--r--test/e2e/import_test.go8
-rw-r--r--test/e2e/libpod_suite_test.go4
-rw-r--r--test/e2e/play_kube_test.go71
-rw-r--r--test/e2e/pod_prune_test.go23
-rw-r--r--test/e2e/pod_rm_test.go51
-rw-r--r--test/e2e/pod_stats_test.go10
-rw-r--r--test/e2e/pod_stop_test.go40
-rw-r--r--test/e2e/prune_test.go17
-rw-r--r--test/e2e/pull_test.go6
-rw-r--r--test/e2e/rm_test.go92
-rw-r--r--test/e2e/run_networking_test.go14
-rw-r--r--test/e2e/run_test.go4
-rw-r--r--test/e2e/stats_test.go9
-rw-r--r--test/e2e/stop_test.go91
-rw-r--r--test/e2e/system_reset_test.go83
-rw-r--r--test/e2e/version_test.go16
-rw-r--r--test/system/010-images.bats15
-rw-r--r--test/system/070-build.bats48
-rw-r--r--troubleshooting.md15
-rw-r--r--vendor/github.com/Microsoft/hcsshim/appveyor.yml2
-rw-r--r--vendor/github.com/Microsoft/hcsshim/container.go2
-rw-r--r--vendor/github.com/Microsoft/hcsshim/go.mod2
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go26
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go33
-rw-r--r--vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go2
-rw-r--r--vendor/github.com/containers/buildah/CHANGELOG.md40
-rw-r--r--vendor/github.com/containers/buildah/add.go17
-rw-r--r--vendor/github.com/containers/buildah/buildah.go4
-rw-r--r--vendor/github.com/containers/buildah/changelog.txt13
-rw-r--r--vendor/github.com/containers/buildah/commit.go7
-rw-r--r--vendor/github.com/containers/buildah/go.mod8
-rw-r--r--vendor/github.com/containers/buildah/go.sum36
-rw-r--r--vendor/github.com/containers/buildah/image.go5
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/stage_executor.go74
-rw-r--r--vendor/github.com/containers/buildah/info.go4
-rw-r--r--vendor/github.com/containers/buildah/install.md2
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go2
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go2
-rw-r--r--vendor/github.com/containers/storage/.cirrus.yml14
-rw-r--r--vendor/github.com/containers/storage/.gitignore1
-rw-r--r--vendor/github.com/containers/storage/.travis.yml15
-rw-r--r--vendor/github.com/containers/storage/Makefile3
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/copy/copy_linux.go18
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_linux.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go42
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/copy_linux.go2
-rw-r--r--vendor/github.com/containers/storage/go.mod23
-rw-r--r--vendor/github.com/containers/storage/go.sum142
-rw-r--r--vendor/github.com/containers/storage/images.go11
-rw-r--r--vendor/github.com/containers/storage/images_ffjson.go108
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go45
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_linux.go13
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/changes.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/changes_linux.go44
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/copy.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/diff.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/config/config.go194
-rw-r--r--vendor/github.com/containers/storage/pkg/fileutils/fileutils.go82
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/loopback/loopback.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/system/filesys.go67
-rw-r--r--vendor/github.com/containers/storage/pkg/system/filesys_windows.go298
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_linux.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/system/xattrs_linux.go65
-rw-r--r--vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go15
-rw-r--r--vendor/github.com/containers/storage/storage.conf41
-rw-r--r--vendor/github.com/containers/storage/store.go57
-rw-r--r--vendor/github.com/containers/storage/utils.go12
-rw-r--r--vendor/github.com/klauspost/compress/flate/crc32_amd64.go42
-rw-r--r--vendor/github.com/klauspost/compress/flate/crc32_amd64.s214
-rw-r--r--vendor/github.com/klauspost/compress/flate/crc32_noasm.go35
-rw-r--r--vendor/github.com/klauspost/compress/flate/deflate.go678
-rw-r--r--vendor/github.com/klauspost/compress/flate/fast_encoder.go257
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go506
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_code.go70
-rw-r--r--vendor/github.com/klauspost/compress/flate/inflate.go71
-rw-r--r--vendor/github.com/klauspost/compress/flate/level1.go174
-rw-r--r--vendor/github.com/klauspost/compress/flate/level2.go199
-rw-r--r--vendor/github.com/klauspost/compress/flate/level3.go225
-rw-r--r--vendor/github.com/klauspost/compress/flate/level4.go210
-rw-r--r--vendor/github.com/klauspost/compress/flate/level5.go276
-rw-r--r--vendor/github.com/klauspost/compress/flate/level6.go279
-rw-r--r--vendor/github.com/klauspost/compress/flate/reverse_bits.go48
-rw-r--r--vendor/github.com/klauspost/compress/flate/snappy.go900
-rw-r--r--vendor/github.com/klauspost/compress/flate/stateless.go266
-rw-r--r--vendor/github.com/klauspost/compress/flate/token.go285
-rw-r--r--vendor/github.com/klauspost/compress/huff0/compress.go17
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress.go128
-rw-r--r--vendor/github.com/klauspost/compress/huff0/huff0.go6
-rw-r--r--vendor/github.com/klauspost/compress/zstd/README.md14
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockdec.go15
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockenc.go68
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bytebuf.go3
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder.go26
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_dfast.go6
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder.go26
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder_options.go47
-rw-r--r--vendor/github.com/klauspost/compress/zstd/framedec.go24
-rw-r--r--vendor/github.com/klauspost/compress/zstd/frameenc.go5
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_encoder.go27
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_predefined.go159
-rw-r--r--vendor/github.com/klauspost/compress/zstd/snappy.go9
-rw-r--r--vendor/github.com/klauspost/compress/zstd/zstd.go1
-rw-r--r--vendor/github.com/klauspost/cpuid/.gitignore24
-rw-r--r--vendor/github.com/klauspost/cpuid/.travis.yml23
-rw-r--r--vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt35
-rw-r--r--vendor/github.com/klauspost/cpuid/LICENSE22
-rw-r--r--vendor/github.com/klauspost/cpuid/README.md147
-rw-r--r--vendor/github.com/klauspost/cpuid/cpuid.go1049
-rw-r--r--vendor/github.com/klauspost/cpuid/cpuid_386.s42
-rw-r--r--vendor/github.com/klauspost/cpuid/cpuid_amd64.s42
-rw-r--r--vendor/github.com/klauspost/cpuid/detect_intel.go17
-rw-r--r--vendor/github.com/klauspost/cpuid/detect_ref.go23
-rw-r--r--vendor/github.com/klauspost/cpuid/generate.go4
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go10
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go58
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go5
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go8
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/config.go1
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go2
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go93
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go112
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go68
-rw-r--r--vendor/github.com/pkg/profile/.travis.yml2
-rw-r--r--vendor/github.com/pkg/profile/go.mod3
-rw-r--r--vendor/github.com/pkg/profile/mutex.go13
-rw-r--r--vendor/github.com/pkg/profile/mutex17.go9
-rw-r--r--vendor/github.com/pkg/profile/profile.go29
-rw-r--r--vendor/github.com/pkg/profile/trace.go8
-rw-r--r--vendor/github.com/pkg/profile/trace16.go10
-rw-r--r--vendor/gopkg.in/yaml.v2/.travis.yml18
-rw-r--r--vendor/gopkg.in/yaml.v2/scannerc.go76
-rw-r--r--vendor/gopkg.in/yaml.v2/yaml.go2
-rw-r--r--vendor/modules.txt17
255 files changed, 7704 insertions, 5250 deletions
diff --git a/.copr/Makefile b/.copr/Makefile
index 71142920b..465a52b15 100644
--- a/.copr/Makefile
+++ b/.copr/Makefile
@@ -8,11 +8,11 @@ SHORT_COMMIT ?= $(shell git rev-parse --short=8 HEAD)
srpm:
mkdir -p $(topdir)
sh $(current_dir)/prepare.sh
- rpmbuild -bs -D "dist %{nil}" -D "_sourcedir build/" -D "_srcrpmdir $(outdir)" -D "_topdir $(topdir)" --nodeps contrib/spec/podman.spec
+ rpmbuild -bs -D "dist %{nil}" -D "_sourcedir build/" -D "_srcrpmdir $(outdir)" -D "_topdir $(topdir)" --nodeps ${extra_arg:-""} contrib/spec/podman.spec
build_binary:
mkdir -p $(topdir)
- rpmbuild --rebuild -D "_rpmdir $(outdir)" -D "_topdir $(topdir)" $(outdir)/podman-*.git$(SHORT_COMMIT).src.rpm
+ rpmbuild --rebuild -D "_rpmdir $(outdir)" -D "_topdir $(topdir)" ${extra_arg:-""} $(outdir)/podman-*.git$(SHORT_COMMIT).src.rpm
clean:
rm -fr rpms
diff --git a/.copr/prepare.sh b/.copr/prepare.sh
index d8ad34d08..d5a9a7f85 100644
--- a/.copr/prepare.sh
+++ b/.copr/prepare.sh
@@ -1,12 +1,14 @@
#!/bin/sh -euf
-set -x
+set -euxo pipefail
OS_TEST=${OS_TEST:=0}
if [ ! -e /usr/bin/git ]; then
dnf -y install git-core
fi
-git fetch --unshallow || :
+if [ -f $(git rev-parse --git-dir)/shallow ]; then
+ git fetch --unshallow
+fi
COMMIT=$(git rev-parse HEAD)
COMMIT_SHORT=$(git rev-parse --short=8 HEAD)
@@ -26,7 +28,12 @@ if [ ${OS_TEST} -eq 0 ]; then
sed -i "s/${BR}/${NEWBR}/g" contrib/spec/podman.spec
fi
-mkdir build/
+mkdir -p build/
git archive --prefix "libpod-${COMMIT_SHORT}/" --format "tar.gz" HEAD -o "build/libpod-${COMMIT_SHORT}.tar.gz"
-git clone https://github.com/containers/conmon
-cd conmon && git checkout 6f3572558b97bc60dd8f8c7f0807748e6ce2c440 && git archive --prefix "conmon/" --format "tar.gz" HEAD -o "../build/conmon.tar.gz"
+if [ ! -d conmon ]; then
+ git clone -n --quiet https://github.com/containers/conmon
+fi
+pushd conmon
+git checkout --detach 6f3572558b97bc60dd8f8c7f0807748e6ce2c440
+git archive --prefix "conmon/" --format "tar.gz" HEAD -o "../build/conmon.tar.gz"
+popd
diff --git a/.gitignore b/.gitignore
index 54b63518f..f14f08396 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,7 @@
/.artifacts/
/_output/
/brew
-/conmon/conmon.o
+/conmon/
/docs/*.[158]
/docs/*.[158].gz
/docs/remote
@@ -27,3 +27,5 @@ podman-remote*.zip
podman*.tar.gz
.idea*
.vscode*
+contrib/spec/podman.spec
+*.rpm
diff --git a/API.md b/API.md
index d96ea6cd0..3a66db83b 100755
--- a/API.md
+++ b/API.md
@@ -95,7 +95,7 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func ImageSave(options: ImageSaveOptions) MoreResponse](#ImageSave)
-[func ImagesPrune(all: bool) []string](#ImagesPrune)
+[func ImagesPrune(all: bool, filter: []string) []string](#ImagesPrune)
[func ImportImage(source: string, reference: string, message: string, changes: []string, delete: bool) string](#ImportImage)
@@ -149,6 +149,8 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func RemovePod(name: string, force: bool) string](#RemovePod)
+[func Reset() ](#Reset)
+
[func RestartContainer(name: string, timeout: int) string](#RestartContainer)
[func RestartPod(name: string) string](#RestartPod)
@@ -766,7 +768,7 @@ ImageSave allows you to save an image from the local image storage to a tarball
### <a name="ImagesPrune"></a>func ImagesPrune
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
-method ImagesPrune(all: [bool](https://godoc.org/builtin#bool)) [[]string](#[]string)</div>
+method ImagesPrune(all: [bool](https://godoc.org/builtin#bool), filter: [[]string](#[]string)) [[]string](#[]string)</div>
ImagesPrune removes all unused images from the local store. Upon successful pruning,
the IDs of the removed images are returned.
### <a name="ImportImage"></a>func ImportImage
@@ -1059,6 +1061,12 @@ $ varlink call -m unix:/run/podman/io.podman/io.podman.RemovePod '{"name": "62f4
"pod": "62f4fd98cb57f529831e8f90610e54bba74bd6f02920ffb485e15376ed365c20"
}
~~~
+### <a name="Reset"></a>func Reset
+<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
+
+method Reset() </div>
+Reset resets Podman back to its initial state.
+Removes all Pods, Containers, Images and Volumes
### <a name="RestartContainer"></a>func RestartContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -1698,6 +1706,8 @@ isParent [bool](https://godoc.org/builtin#bool)
topLayer [string](https://godoc.org/builtin#string)
readOnly [bool](https://godoc.org/builtin#bool)
+
+history [[]string](#[]string)
### <a name="ImageHistory"></a>type ImageHistory
ImageHistory describes the returned structure from ImageHistory.
diff --git a/Dockerfile b/Dockerfile
index a7d795b29..f85c47937 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -50,7 +50,7 @@ RUN set -x \
&& git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
&& cd "$GOPATH/src/github.com/opencontainers/runc" \
&& git fetch origin --tags \
- && git checkout -q "$RUNC_COMMIT" \
+ && git checkout --detach -q "$RUNC_COMMIT" \
&& make static BUILDTAGS="seccomp selinux" \
&& cp runc /usr/bin/runc \
&& rm -rf "$GOPATH"
@@ -62,7 +62,7 @@ RUN set -x \
&& git clone https://github.com/containers/conmon.git "$GOPATH/src/github.com/containers/conmon.git" \
&& cd "$GOPATH/src/github.com/containers/conmon.git" \
&& git fetch origin --tags \
- && git checkout -q "$CONMON_COMMIT" \
+ && git checkout --detach -q "$CONMON_COMMIT" \
&& make \
&& install -D -m 755 bin/conmon /usr/libexec/podman/conmon \
&& rm -rf "$GOPATH"
@@ -73,7 +73,7 @@ RUN set -x \
&& export GOPATH="$(mktemp -d)" GOCACHE="$(mktemp -d)" \
&& git clone https://github.com/containernetworking/plugins.git "$GOPATH/src/github.com/containernetworking/plugins" \
&& cd "$GOPATH/src/github.com/containernetworking/plugins" \
- && git checkout -q "$CNI_COMMIT" \
+ && git checkout --detach -q "$CNI_COMMIT" \
&& ./build_linux.sh \
&& mkdir -p /usr/libexec/cni \
&& cp bin/* /usr/libexec/cni \
diff --git a/Dockerfile.centos b/Dockerfile.centos
index 7c2a05dd1..f5a2b891c 100644
--- a/Dockerfile.centos
+++ b/Dockerfile.centos
@@ -35,7 +35,7 @@ RUN set -x \
&& export GOPATH="$(mktemp -d)" GOCACHE="$(mktemp -d)" \
&& git clone https://github.com/containernetworking/plugins.git "$GOPATH/src/github.com/containernetworking/plugins" \
&& cd "$GOPATH/src/github.com/containernetworking/plugins" \
- && git checkout -q "$CNI_COMMIT" \
+ && git checkout --detach -q "$CNI_COMMIT" \
&& ./build_linux.sh \
&& mkdir -p /usr/libexec/cni \
&& cp bin/* /usr/libexec/cni \
@@ -59,7 +59,7 @@ RUN set -x \
&& git clone https://github.com/containers/conmon.git "$GOPATH/src/github.com/containers/conmon.git" \
&& cd "$GOPATH/src/github.com/containers/conmon.git" \
&& git fetch origin --tags \
- && git checkout -q "$CONMON_COMMIT" \
+ && git checkout --detach -q "$CONMON_COMMIT" \
&& make \
&& install -D -m 755 bin/conmon /usr/libexec/podman/conmon \
&& rm -rf "$GOPATH"
diff --git a/Dockerfile.fedora b/Dockerfile.fedora
index ef9a718dc..45b2c3670 100644
--- a/Dockerfile.fedora
+++ b/Dockerfile.fedora
@@ -52,7 +52,7 @@ RUN set -x \
&& git clone https://github.com/containers/conmon.git "$GOPATH/src/github.com/containers/conmon.git" \
&& cd "$GOPATH/src/github.com/containers/conmon.git" \
&& git fetch origin --tags \
- && git checkout -q "$CONMON_COMMIT" \
+ && git checkout --detach -q "$CONMON_COMMIT" \
&& make \
&& install -D -m 755 bin/conmon /usr/libexec/podman/conmon \
&& rm -rf "$GOPATH"
diff --git a/Makefile b/Makefile
index b3566cd1e..3f8820780 100644
--- a/Makefile
+++ b/Makefile
@@ -32,6 +32,9 @@ BUILDTAGS ?= \
exclude_graphdriver_devicemapper \
seccomp \
varlink
+PYTHON ?= $(shell command -v python python3|head -n1)
+PKG_MANAGER ?= $(shell command -v dnf yum|head -n1)
+
GO_BUILD=$(GO) build
# Go module support: set `-mod=vendor` to use the vendored sources
@@ -133,7 +136,7 @@ endef
export PRINT_HELP_PYSCRIPT
help:
- @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST)
+ @$(PYTHON) -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST)
.gopathok:
ifeq ("$(wildcard $(GOPKGDIR))","")
@@ -317,7 +320,7 @@ $(MANPAGES): %: %.md .gopathok
docdir:
mkdir -p docs/build/man
-docs: docdir $(MANPAGES) ## Generate documentation
+docs: .install.md2man docdir $(MANPAGES) ## Generate documentation
install-podman-remote-%-docs: podman-remote docs $(MANPAGES)
rm -rf docs/build/remote
@@ -475,7 +478,7 @@ endef
if [ ! -x "$(GOBIN)/gometalinter" ]; then \
$(call go-get,github.com/alecthomas/gometalinter); \
cd $(FIRST_GOPATH)/src/github.com/alecthomas/gometalinter; \
- git checkout e8d801238da6f0dfd14078d68f9b53fa50a7eeb5; \
+ git checkout --detach e8d801238da6f0dfd14078d68f9b53fa50a7eeb5; \
$(GO) install github.com/alecthomas/gometalinter; \
$(GOBIN)/gometalinter --install; \
fi
@@ -486,8 +489,8 @@ endef
fi
.install.md2man: .gopathok
- if [ ! -x "$(GOBIN)/go-md2man" ]; then \
- $(call go-get,github.com/cpuguy83/go-md2man); \
+ if [ ! -x "$(GOMD2MAN)" ]; then \
+ $(call go-get,github.com/cpuguy83/go-md2man); \
fi
varlink_generate: .gopathok cmd/podman/varlink/iopodman.go ## Generate varlink
@@ -497,7 +500,7 @@ varlink_api_generate: .gopathok API.md
install.libseccomp.sudo:
rm -rf ../../seccomp/libseccomp
git clone https://github.com/seccomp/libseccomp ../../seccomp/libseccomp
- cd ../../seccomp/libseccomp && git checkout $(LIBSECCOMP_COMMIT) && ./autogen.sh && ./configure --prefix=/usr && make all && make install
+ cd ../../seccomp/libseccomp && git checkout --detach $(LIBSECCOMP_COMMIT) && ./autogen.sh && ./configure --prefix=/usr && make all && make install
cmd/podman/varlink/iopodman.go: cmd/podman/varlink/io.podman.varlink
@@ -531,19 +534,30 @@ vendor-in-container:
.PHONY: \
.gopathok \
binaries \
+ changelog \
clean \
- validate.completions \
default \
docs \
gofmt \
+ golangci-lint \
help \
install \
- golangci-lint \
+ install.libseccomp.sudo \
lint \
pause \
- uninstall \
+ package \
+ package-install \
shell \
- changelog \
+ uninstall \
validate \
- install.libseccomp.sudo \
+ validate.completions \
vendor
+
+package: ## Build rpm packages
+ ## TODO(ssbarnea): make version number predictable, it should not change
+ ## on each execution, producing duplicates.
+ rm -f ~/rpmbuild/RPMS/x86_64/* ~/rpmbuild/RPMS/noarch/*
+ ./contrib/build_rpm.sh
+
+package-install: package ## Install rpm packages
+ sudo ${PKG_MANAGER} -y install --allowerasing ${HOME}/rpmbuild/RPMS/*/*.rpm
diff --git a/cmd/podman/attach.go b/cmd/podman/attach.go
index eeedea8c8..7d32c57af 100644
--- a/cmd/podman/attach.go
+++ b/cmd/podman/attach.go
@@ -33,6 +33,9 @@ func init() {
attachCommand.SetUsageTemplate(UsageTemplate())
flags := attachCommand.Flags()
flags.StringVar(&attachCommand.DetachKeys, "detach-keys", define.DefaultDetachKeys, "Select the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`")
+ // Clear the default, the value specified in the config file should have the
+ // priority
+ attachCommand.DetachKeys = ""
flags.BoolVar(&attachCommand.NoStdin, "no-stdin", false, "Do not attach STDIN. The default is false")
flags.BoolVar(&attachCommand.SigProxy, "sig-proxy", true, "Proxy received signals to the process")
flags.BoolVarP(&attachCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
diff --git a/cmd/podman/checkpoint.go b/cmd/podman/checkpoint.go
index 22cdb1f39..07db519f8 100644
--- a/cmd/podman/checkpoint.go
+++ b/cmd/podman/checkpoint.go
@@ -26,7 +26,7 @@ var (
return checkpointCmd(&checkpointCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, false)
+ return checkAllLatestAndCIDFile(cmd, args, false, false)
},
Example: `podman container checkpoint --keep ctrID
podman container checkpoint --all
diff --git a/cmd/podman/cleanup.go b/cmd/podman/cleanup.go
index c00654162..a8bc0c116 100644
--- a/cmd/podman/cleanup.go
+++ b/cmd/podman/cleanup.go
@@ -27,7 +27,7 @@ var (
return cleanupCmd(&cleanupCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, false)
+ return checkAllLatestAndCIDFile(cmd, args, false, false)
},
Example: `podman container cleanup --latest
podman container cleanup ctrID1 ctrID2 ctrID3
diff --git a/cmd/podman/cliconfig/config.go b/cmd/podman/cliconfig/config.go
index 857c49a48..431d5e087 100644
--- a/cmd/podman/cliconfig/config.go
+++ b/cmd/podman/cliconfig/config.go
@@ -64,6 +64,7 @@ type ImagesValues struct {
NoTrunc bool
Quiet bool
Sort string
+ History bool
}
type EventValues struct {
@@ -175,12 +176,15 @@ type HistoryValues struct {
}
type PruneImagesValues struct {
PodmanCommand
- All bool
+ All bool
+ Force bool
+ Filter []string
}
type PruneContainersValues struct {
PodmanCommand
- Force bool
+ Force bool
+ Filter []string
}
type PodPruneValues struct {
@@ -367,6 +371,7 @@ type PodRestartValues struct {
type PodRmValues struct {
PodmanCommand
All bool
+ Ignore bool
Force bool
Latest bool
}
@@ -388,6 +393,7 @@ type PodStatsValues struct {
type PodStopValues struct {
PodmanCommand
All bool
+ Ignore bool
Latest bool
Timeout uint
}
@@ -481,11 +487,13 @@ type RestoreValues struct {
type RmValues struct {
PodmanCommand
- All bool
- Force bool
- Latest bool
- Storage bool
- Volumes bool
+ All bool
+ Force bool
+ Ignore bool
+ Latest bool
+ Storage bool
+ Volumes bool
+ CIDFiles []string
}
type RmiValues struct {
@@ -558,9 +566,11 @@ type StatsValues struct {
type StopValues struct {
PodmanCommand
- All bool
- Latest bool
- Timeout uint
+ All bool
+ Ignore bool
+ Latest bool
+ Timeout uint
+ CIDFiles []string
}
type TopValues struct {
@@ -650,6 +660,11 @@ type SystemPruneValues struct {
Volume bool
}
+type SystemResetValues struct {
+ PodmanCommand
+ Force bool
+}
+
type SystemRenumberValues struct {
PodmanCommand
}
diff --git a/cmd/podman/common.go b/cmd/podman/common.go
index 4db043f31..69365201e 100644
--- a/cmd/podman/common.go
+++ b/cmd/podman/common.go
@@ -39,24 +39,45 @@ func shortID(id string) string {
return id
}
-// checkAllAndLatest checks that --all and --latest are used correctly
-func checkAllAndLatest(c *cobra.Command, args []string, ignoreArgLen bool) error {
+// checkAllLatestAndCIDFile checks that --all and --latest are used correctly.
+// If cidfile is set, also check for the --cidfile flag.
+func checkAllLatestAndCIDFile(c *cobra.Command, args []string, ignoreArgLen bool, cidfile bool) error {
argLen := len(args)
if c.Flags().Lookup("all") == nil || c.Flags().Lookup("latest") == nil {
- return errors.New("unable to lookup values for 'latest' or 'all'")
+ if !cidfile {
+ return errors.New("unable to lookup values for 'latest' or 'all'")
+ } else if c.Flags().Lookup("cidfile") == nil {
+ return errors.New("unable to lookup values for 'latest', 'all' or 'cidfile'")
+ }
+ }
+
+ specifiedAll, _ := c.Flags().GetBool("all")
+ specifiedLatest, _ := c.Flags().GetBool("latest")
+ specifiedCIDFile := false
+ if cid, _ := c.Flags().GetStringArray("cidfile"); len(cid) > 0 {
+ specifiedCIDFile = true
}
- all, _ := c.Flags().GetBool("all")
- latest, _ := c.Flags().GetBool("latest")
- if all && latest {
+
+ if specifiedCIDFile && (specifiedAll || specifiedLatest) {
+ return errors.Errorf("--all, --latest and --cidfile cannot be used together")
+ } else if specifiedAll && specifiedLatest {
return errors.Errorf("--all and --latest cannot be used together")
}
+
if ignoreArgLen {
return nil
}
- if (all || latest) && argLen > 0 {
+ if (argLen > 0) && (specifiedAll || specifiedLatest) {
return errors.Errorf("no arguments are needed with --all or --latest")
+ } else if cidfile && (argLen > 0) && (specifiedAll || specifiedLatest || specifiedCIDFile) {
+ return errors.Errorf("no arguments are needed with --all, --latest or --cidfile")
}
- if argLen < 1 && !all && !latest {
+
+ if specifiedCIDFile {
+ return nil
+ }
+
+ if argLen < 1 && !specifiedAll && !specifiedLatest && !specifiedCIDFile {
return errors.Errorf("you must provide at least one name or id")
}
return nil
@@ -187,10 +208,14 @@ func getCreateFlags(c *cliconfig.PodmanCommand) {
"detach", "d", false,
"Run container in background and print container ID",
)
- createFlags.String(
+ detachKeys := createFlags.String(
"detach-keys", define.DefaultDetachKeys,
"Override the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`",
)
+ // Clear the default, the value specified in the config file should have the
+ // priority
+ *detachKeys = ""
+
createFlags.StringSlice(
"device", []string{},
"Add a host device to the container (default [])",
diff --git a/cmd/podman/containers_prune.go b/cmd/podman/containers_prune.go
index 3d0fef37d..78c50268c 100644
--- a/cmd/podman/containers_prune.go
+++ b/cmd/podman/containers_prune.go
@@ -1,6 +1,11 @@
package main
import (
+ "bufio"
+ "fmt"
+ "os"
+ "strings"
+
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod/define"
@@ -36,9 +41,23 @@ func init() {
pruneContainersCommand.SetUsageTemplate(UsageTemplate())
flags := pruneContainersCommand.Flags()
flags.BoolVarP(&pruneContainersCommand.Force, "force", "f", false, "Force removal of a running container. The default is false")
+ flags.StringArrayVar(&pruneContainersCommand.Filter, "filter", []string{}, "Provide filter values (e.g. 'until=<timestamp>')")
}
func pruneContainersCmd(c *cliconfig.PruneContainersValues) error {
+ if !c.Force {
+ reader := bufio.NewReader(os.Stdin)
+ fmt.Printf(`WARNING! This will remove all stopped containers.
+Are you sure you want to continue? [y/N] `)
+ ans, err := reader.ReadString('\n')
+ if err != nil {
+ return errors.Wrapf(err, "error reading input")
+ }
+ if strings.ToLower(ans)[0] != 'y' {
+ return nil
+ }
+ }
+
runtime, err := adapter.GetRuntime(getContext(), &c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
@@ -49,7 +68,7 @@ func pruneContainersCmd(c *cliconfig.PruneContainersValues) error {
if c.GlobalIsSet("max-workers") {
maxWorkers = c.GlobalFlags.MaxWorks
}
- ok, failures, err := runtime.Prune(getContext(), maxWorkers, c.Force)
+ ok, failures, err := runtime.Prune(getContext(), maxWorkers, c.Force, c.Filter)
if err != nil {
if errors.Cause(err) == define.ErrNoSuchCtr {
if len(c.InputArgs) > 1 {
diff --git a/cmd/podman/exec.go b/cmd/podman/exec.go
index afa701897..8dcec24ce 100644
--- a/cmd/podman/exec.go
+++ b/cmd/podman/exec.go
@@ -36,6 +36,9 @@ func init() {
flags := execCommand.Flags()
flags.SetInterspersed(false)
flags.StringVar(&execCommand.DetachKeys, "detach-keys", define.DefaultDetachKeys, "Select the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _")
+ // Clear the default, the value specified in the config file should have the
+ // priority
+ execCommand.DetachKeys = ""
flags.StringArrayVarP(&execCommand.Env, "env", "e", []string{}, "Set environment variables")
flags.BoolVarP(&execCommand.Interactive, "interactive", "i", false, "Keep STDIN open even if not attached")
flags.BoolVarP(&execCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
diff --git a/cmd/podman/images.go b/cmd/podman/images.go
index 7d498517c..6b16272f4 100644
--- a/cmd/podman/images.go
+++ b/cmd/podman/images.go
@@ -32,6 +32,7 @@ type imagesTemplateParams struct {
CreatedTime time.Time
Size string
ReadOnly bool
+ History string
}
type imagesJSONParams struct {
@@ -42,6 +43,7 @@ type imagesJSONParams struct {
Created time.Time `json:"created"`
Size *uint64 `json:"size"`
ReadOnly bool `json:"readonly"`
+ History []string `json:"history"`
}
type imagesOptions struct {
@@ -53,6 +55,7 @@ type imagesOptions struct {
outputformat string
sort string
all bool
+ history bool
}
// Type declaration and functions for sorting the images output
@@ -124,6 +127,7 @@ func imagesInit(command *cliconfig.ImagesValues) {
flags.BoolVar(&command.NoTrunc, "no-trunc", false, "Do not truncate output")
flags.BoolVarP(&command.Quiet, "quiet", "q", false, "Display only image IDs")
flags.StringVar(&command.Sort, "sort", "created", "Sort by created, id, repository, size, or tag")
+ flags.BoolVarP(&command.History, "history", "", false, "Display the image name history")
}
@@ -171,6 +175,7 @@ func imagesCmd(c *cliconfig.ImagesValues) error {
format: c.Format,
sort: c.Sort,
all: c.All,
+ history: c.History,
}
opts.outputformat = opts.setOutputFormat()
@@ -214,6 +219,9 @@ func (i imagesOptions) setOutputFormat() string {
format += "{{.Digest}}\t"
}
format += "{{.ID}}\t{{.Created}}\t{{.Size}}\t"
+ if i.history {
+ format += "{{if .History}}{{.History}}{{else}}<none>{{end}}\t"
+ }
return format
}
@@ -306,6 +314,7 @@ func getImagesTemplateOutput(ctx context.Context, images []*adapter.ContainerIma
Created: units.HumanDuration(time.Since(createdTime)) + " ago",
Size: sizeStr,
ReadOnly: img.IsReadOnly(),
+ History: strings.Join(img.NamesHistory(), ", "),
}
imagesOutput = append(imagesOutput, params)
if opts.quiet { // Show only one image ID when quiet
@@ -336,6 +345,7 @@ func getImagesJSONOutput(ctx context.Context, images []*adapter.ContainerImage)
Created: img.Created(),
Size: size,
ReadOnly: img.IsReadOnly(),
+ History: img.NamesHistory(),
}
imagesOutput = append(imagesOutput, params)
}
diff --git a/cmd/podman/images_prune.go b/cmd/podman/images_prune.go
index 5745edd6b..2b498f83d 100644
--- a/cmd/podman/images_prune.go
+++ b/cmd/podman/images_prune.go
@@ -1,7 +1,10 @@
package main
import (
+ "bufio"
"fmt"
+ "os"
+ "strings"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/pkg/adapter"
@@ -34,9 +37,24 @@ func init() {
pruneImagesCommand.SetUsageTemplate(UsageTemplate())
flags := pruneImagesCommand.Flags()
flags.BoolVarP(&pruneImagesCommand.All, "all", "a", false, "Remove all unused images, not just dangling ones")
+ flags.BoolVarP(&pruneImagesCommand.Force, "force", "f", false, "Do not prompt for confirmation")
+ flags.StringArrayVar(&pruneImagesCommand.Filter, "filter", []string{}, "Provide filter values (e.g. 'label=<key>=<value>')")
}
func pruneImagesCmd(c *cliconfig.PruneImagesValues) error {
+ if !c.Force {
+ reader := bufio.NewReader(os.Stdin)
+ fmt.Printf(`
+WARNING! This will remove all dangling images.
+Are you sure you want to continue? [y/N] `)
+ ans, err := reader.ReadString('\n')
+ if err != nil {
+ return errors.Wrapf(err, "error reading input")
+ }
+ if strings.ToLower(ans)[0] != 'y' {
+ return nil
+ }
+ }
runtime, err := adapter.GetRuntime(getContext(), &c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
@@ -45,7 +63,7 @@ func pruneImagesCmd(c *cliconfig.PruneImagesValues) error {
// Call prune; if any cids are returned, print them and then
// return err in case an error also came up
- pruneCids, err := runtime.PruneImages(getContext(), c.All)
+ pruneCids, err := runtime.PruneImages(getContext(), c.All, c.Filter)
if len(pruneCids) > 0 {
for _, cid := range pruneCids {
fmt.Println(cid)
diff --git a/cmd/podman/init.go b/cmd/podman/init.go
index 3f97824fc..2e0b33828 100644
--- a/cmd/podman/init.go
+++ b/cmd/podman/init.go
@@ -23,7 +23,7 @@ var (
return initCmd(&initCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, false)
+ return checkAllLatestAndCIDFile(cmd, args, false, false)
},
Example: `podman init --latest
podman init 3c45ef19d893
diff --git a/cmd/podman/kill.go b/cmd/podman/kill.go
index d5056d86d..aba2008ca 100644
--- a/cmd/podman/kill.go
+++ b/cmd/podman/kill.go
@@ -24,7 +24,7 @@ var (
return killCmd(&killCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, false)
+ return checkAllLatestAndCIDFile(cmd, args, false, false)
},
Example: `podman kill mywebserver
podman kill 860a4b23
diff --git a/cmd/podman/main_local.go b/cmd/podman/main_local.go
index 0484e3cf0..bc46e4652 100644
--- a/cmd/podman/main_local.go
+++ b/cmd/podman/main_local.go
@@ -69,7 +69,7 @@ func init() {
rootCmd.PersistentFlags().StringArrayVar(&MainGlobalOpts.StorageOpts, "storage-opt", []string{}, "Used to pass an option to the storage driver")
rootCmd.PersistentFlags().BoolVar(&MainGlobalOpts.Syslog, "syslog", false, "Output logging information to syslog as well as the console")
- rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.TmpDir, "tmpdir", "", "Path to the tmp directory")
+ rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.TmpDir, "tmpdir", "", "Path to the tmp directory for libpod state content.\n\nNote: use the environment variable 'TMPDIR' to change the temporary storage location for container images, '/var/tmp'.\n")
rootCmd.PersistentFlags().BoolVar(&MainGlobalOpts.Trace, "trace", false, "Enable opentracing output")
}
@@ -159,7 +159,7 @@ func setupRootless(cmd *cobra.Command, args []string) error {
Remote: remoteclient,
}
- runtime, err := libpodruntime.GetRuntime(getContext(), &podmanCmd)
+ runtime, err := libpodruntime.GetRuntimeNoStore(getContext(), &podmanCmd)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
diff --git a/cmd/podman/mount.go b/cmd/podman/mount.go
index b14827592..526a236fd 100644
--- a/cmd/podman/mount.go
+++ b/cmd/podman/mount.go
@@ -35,7 +35,7 @@ var (
return mountCmd(&mountCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, true)
+ return checkAllLatestAndCIDFile(cmd, args, true, false)
},
}
)
diff --git a/cmd/podman/pod_kill.go b/cmd/podman/pod_kill.go
index 9bda77471..064946f72 100644
--- a/cmd/podman/pod_kill.go
+++ b/cmd/podman/pod_kill.go
@@ -28,7 +28,7 @@ var (
return podKillCmd(&podKillCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, false)
+ return checkAllLatestAndCIDFile(cmd, args, false, false)
},
Example: `podman pod kill podID
podman pod kill --signal TERM mywebserver
diff --git a/cmd/podman/pod_pause.go b/cmd/podman/pod_pause.go
index 45e1319ff..320072919 100644
--- a/cmd/podman/pod_pause.go
+++ b/cmd/podman/pod_pause.go
@@ -25,7 +25,7 @@ var (
return podPauseCmd(&podPauseCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, false)
+ return checkAllLatestAndCIDFile(cmd, args, false, false)
},
Example: `podman pod pause podID1 podID2
podman pod pause --latest
diff --git a/cmd/podman/pod_restart.go b/cmd/podman/pod_restart.go
index cc090bd6e..cb9f3770f 100644
--- a/cmd/podman/pod_restart.go
+++ b/cmd/podman/pod_restart.go
@@ -26,7 +26,7 @@ var (
return podRestartCmd(&podRestartCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, false)
+ return checkAllLatestAndCIDFile(cmd, args, false, false)
},
Example: `podman pod restart podID1 podID2
podman pod restart --latest
diff --git a/cmd/podman/pod_rm.go b/cmd/podman/pod_rm.go
index 82d0eb977..02daf8764 100644
--- a/cmd/podman/pod_rm.go
+++ b/cmd/podman/pod_rm.go
@@ -12,7 +12,7 @@ import (
var (
podRmCommand cliconfig.PodRmValues
- podRmDescription = fmt.Sprintf(`podman rm will remove one or more pods from the host.
+ podRmDescription = fmt.Sprintf(`podman rm will remove one or more stopped pods and their containers from the host.
The pod name or ID can be used. A pod with containers will not be removed without --force. If --force is specified, all containers will be stopped, then removed.`)
_podRmCommand = &cobra.Command{
@@ -26,7 +26,7 @@ var (
return podRmCmd(&podRmCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, false)
+ return checkAllLatestAndCIDFile(cmd, args, false, false)
},
Example: `podman pod rm mywebserverpod
podman pod rm -f 860a4b23
@@ -41,7 +41,9 @@ func init() {
flags := podRmCommand.Flags()
flags.BoolVarP(&podRmCommand.All, "all", "a", false, "Remove all running pods")
flags.BoolVarP(&podRmCommand.Force, "force", "f", false, "Force removal of a running pod by first stopping all containers, then removing all containers in the pod. The default is false")
+ flags.BoolVarP(&podRmCommand.Ignore, "ignore", "i", false, "Ignore errors when a specified pod is missing")
flags.BoolVarP(&podRmCommand.Latest, "latest", "l", false, "Remove the latest pod podman is aware of")
+ markFlagHiddenForRemoteClient("ignore", flags)
markFlagHiddenForRemoteClient("latest", flags)
}
diff --git a/cmd/podman/pod_start.go b/cmd/podman/pod_start.go
index 64c951b43..aa2e09e98 100644
--- a/cmd/podman/pod_start.go
+++ b/cmd/podman/pod_start.go
@@ -26,7 +26,7 @@ var (
return podStartCmd(&podStartCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, false)
+ return checkAllLatestAndCIDFile(cmd, args, false, false)
},
Example: `podman pod start podID
podman pod start --latest
diff --git a/cmd/podman/pod_stop.go b/cmd/podman/pod_stop.go
index edda99550..7d3951ec4 100644
--- a/cmd/podman/pod_stop.go
+++ b/cmd/podman/pod_stop.go
@@ -27,7 +27,7 @@ var (
return podStopCmd(&podStopCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, false)
+ return checkAllLatestAndCIDFile(cmd, args, false, false)
},
Example: `podman pod stop mywebserverpod
podman pod stop --latest
@@ -41,8 +41,10 @@ func init() {
podStopCommand.SetUsageTemplate(UsageTemplate())
flags := podStopCommand.Flags()
flags.BoolVarP(&podStopCommand.All, "all", "a", false, "Stop all running pods")
+ flags.BoolVarP(&podStopCommand.Ignore, "ignore", "i", false, "Ignore errors when a specified pod is missing")
flags.BoolVarP(&podStopCommand.Latest, "latest", "l", false, "Stop the latest pod podman is aware of")
flags.UintVarP(&podStopCommand.Timeout, "timeout", "t", 0, "Seconds to wait for pod stop before killing the container")
+ markFlagHiddenForRemoteClient("ignore", flags)
markFlagHiddenForRemoteClient("latest", flags)
}
diff --git a/cmd/podman/pod_unpause.go b/cmd/podman/pod_unpause.go
index 833434c3f..1f80a7c79 100644
--- a/cmd/podman/pod_unpause.go
+++ b/cmd/podman/pod_unpause.go
@@ -26,7 +26,7 @@ var (
return podUnpauseCmd(&podUnpauseCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, false)
+ return checkAllLatestAndCIDFile(cmd, args, false, false)
},
Example: `podman pod unpause podID1 podID2
podman pod unpause --all
diff --git a/cmd/podman/pods_prune.go b/cmd/podman/pods_prune.go
index d40e37bdb..1c5c0bb58 100644
--- a/cmd/podman/pods_prune.go
+++ b/cmd/podman/pods_prune.go
@@ -17,7 +17,7 @@ var (
_prunePodsCommand = &cobra.Command{
Use: "prune",
Args: noSubArgs,
- Short: "Remove all stopped pods",
+ Short: "Remove all stopped pods and their containers",
Long: podPruneDescription,
RunE: func(cmd *cobra.Command, args []string) error {
podPruneCommand.InputArgs = args
@@ -32,7 +32,7 @@ func init() {
podPruneCommand.SetHelpTemplate(HelpTemplate())
podPruneCommand.SetUsageTemplate(UsageTemplate())
flags := podPruneCommand.Flags()
- flags.BoolVarP(&podPruneCommand.Force, "force", "f", false, "Force removal of a running pods. The default is false")
+ flags.BoolVarP(&podPruneCommand.Force, "force", "f", false, "Force removal of all running pods. The default is false")
}
func podPruneCmd(c *cliconfig.PodPruneValues) error {
diff --git a/cmd/podman/port.go b/cmd/podman/port.go
index 4e1f9642c..eef3d4b1d 100644
--- a/cmd/podman/port.go
+++ b/cmd/podman/port.go
@@ -26,7 +26,7 @@ var (
return portCmd(&portCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, true)
+ return checkAllLatestAndCIDFile(cmd, args, true, false)
},
Example: `podman port --all
podman port ctrID 80/tcp
diff --git a/cmd/podman/reset.go b/cmd/podman/reset.go
new file mode 100644
index 000000000..9d16dc978
--- /dev/null
+++ b/cmd/podman/reset.go
@@ -0,0 +1,71 @@
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/pkg/adapter"
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+)
+
+var (
+ systemResetCommand cliconfig.SystemResetValues
+ systemResetDescription = `Reset podman storage back to default state"
+
+ All containers will be stopped and removed, and all images, volumes and container content will be removed.
+`
+ _systemResetCommand = &cobra.Command{
+ Use: "reset",
+ Args: noSubArgs,
+ Short: "Reset podman storage",
+ Long: systemResetDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ systemResetCommand.InputArgs = args
+ systemResetCommand.GlobalFlags = MainGlobalOpts
+ systemResetCommand.Remote = remoteclient
+ return systemResetCmd(&systemResetCommand)
+ },
+ }
+)
+
+func init() {
+ systemResetCommand.Command = _systemResetCommand
+ flags := systemResetCommand.Flags()
+ flags.BoolVarP(&systemResetCommand.Force, "force", "f", false, "Do not prompt for confirmation")
+
+ systemResetCommand.SetHelpTemplate(HelpTemplate())
+ systemResetCommand.SetUsageTemplate(UsageTemplate())
+}
+
+func systemResetCmd(c *cliconfig.SystemResetValues) error {
+ // Prompt for confirmation if --force is not set
+ if !c.Force {
+ reader := bufio.NewReader(os.Stdin)
+ fmt.Print(`
+WARNING! This will remove:
+ - all containers
+ - all pods
+ - all images
+ - all build cache
+Are you sure you want to continue? [y/N] `)
+ ans, err := reader.ReadString('\n')
+ if err != nil {
+ return errors.Wrapf(err, "error reading input")
+ }
+ if strings.ToLower(ans)[0] != 'y' {
+ return nil
+ }
+ }
+
+ runtime, err := adapter.GetRuntime(getContext(), &c.PodmanCommand)
+ if err != nil {
+ return errors.Wrapf(err, "error creating libpod runtime")
+ }
+ // No shutdown, since storage will be destroyed when command completes
+
+ return runtime.Reset()
+}
diff --git a/cmd/podman/restart.go b/cmd/podman/restart.go
index c97fb0dc1..996a9f7ce 100644
--- a/cmd/podman/restart.go
+++ b/cmd/podman/restart.go
@@ -23,7 +23,7 @@ var (
return restartCmd(&restartCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, false)
+ return checkAllLatestAndCIDFile(cmd, args, false, false)
},
Example: `podman restart ctrID
podman restart --latest
diff --git a/cmd/podman/restore.go b/cmd/podman/restore.go
index caefadb6d..a8db7fa94 100644
--- a/cmd/podman/restore.go
+++ b/cmd/podman/restore.go
@@ -26,7 +26,7 @@ var (
return restoreCmd(&restoreCommand, cmd)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, true)
+ return checkAllLatestAndCIDFile(cmd, args, true, false)
},
Example: `podman container restore ctrID
podman container restore --latest
diff --git a/cmd/podman/rm.go b/cmd/podman/rm.go
index 6329a9d8e..e69565e95 100644
--- a/cmd/podman/rm.go
+++ b/cmd/podman/rm.go
@@ -25,7 +25,7 @@ var (
return rmCmd(&rmCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, false)
+ return checkAllLatestAndCIDFile(cmd, args, false, true)
},
Example: `podman rm imageID
podman rm mywebserver myflaskserver 860a4b23
@@ -40,12 +40,16 @@ func init() {
rmCommand.SetUsageTemplate(UsageTemplate())
flags := rmCommand.Flags()
flags.BoolVarP(&rmCommand.All, "all", "a", false, "Remove all containers")
+ flags.BoolVarP(&rmCommand.Ignore, "ignore", "i", false, "Ignore errors when a specified container is missing")
flags.BoolVarP(&rmCommand.Force, "force", "f", false, "Force removal of a running or unusable container. The default is false")
flags.BoolVarP(&rmCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
flags.BoolVar(&rmCommand.Storage, "storage", false, "Remove container from storage library")
flags.BoolVarP(&rmCommand.Volumes, "volumes", "v", false, "Remove anonymous volumes associated with the container")
- markFlagHiddenForRemoteClient("storage", flags)
+ flags.StringArrayVarP(&rmCommand.CIDFiles, "cidfile", "", nil, "Read the container ID from the file")
+ markFlagHiddenForRemoteClient("ignore", flags)
+ markFlagHiddenForRemoteClient("cidfile", flags)
markFlagHiddenForRemoteClient("latest", flags)
+ markFlagHiddenForRemoteClient("storage", flags)
}
// rmCmd removes one or more containers
@@ -56,10 +60,10 @@ func rmCmd(c *cliconfig.RmValues) error {
}
defer runtime.DeferredShutdown(false)
- // Storage conflicts with --all/--latest/--volumes
+ // Storage conflicts with --all/--latest/--volumes/--cidfile/--ignore
if c.Storage {
- if c.All || c.Latest || c.Volumes {
- return errors.Errorf("--storage conflicts with --volumes, --all, and --latest")
+ if c.All || c.Ignore || c.Latest || c.Volumes || c.CIDFiles != nil {
+ return errors.Errorf("--storage conflicts with --volumes, --all, --latest, --ignore and --cidfile")
}
}
diff --git a/cmd/podman/shared/container.go b/cmd/podman/shared/container.go
index f49943477..4f2002992 100644
--- a/cmd/podman/shared/container.go
+++ b/cmd/podman/shared/container.go
@@ -17,6 +17,7 @@ import (
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/timetype"
"github.com/containers/libpod/pkg/util"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/go-units"
@@ -195,6 +196,8 @@ func NewBatchContainer(ctr *libpod.Container, opts PsOptions) (PsContainerOutput
status = "Paused"
case define.ContainerStateCreated.String(), define.ContainerStateConfigured.String():
status = "Created"
+ case define.ContainerStateRemoving.String():
+ status = "Removing"
default:
status = "Error"
}
@@ -272,7 +275,8 @@ func worker(wg *sync.WaitGroup, jobs <-chan workerInput, results chan<- PsContai
}
}
-func generateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime) (func(container *libpod.Container) bool, error) {
+// GenerateContainerFilterFuncs return ContainerFilter functions based of filter.
+func GenerateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime) (func(container *libpod.Container) bool, error) {
switch filter {
case "id":
return func(c *libpod.Container) bool {
@@ -394,6 +398,22 @@ func generateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime)
}
return hcStatus == filterValue
}, nil
+ case "until":
+ ts, err := timetype.GetTimestamp(filterValue, time.Now())
+ if err != nil {
+ return nil, err
+ }
+ seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0)
+ if err != nil {
+ return nil, err
+ }
+ until := time.Unix(seconds, nanoseconds)
+ return func(c *libpod.Container) bool {
+ if !until.IsZero() && c.CreatedTime().After((until)) {
+ return true
+ }
+ return false
+ }, nil
}
return nil, errors.Errorf("%s is an invalid filter", filter)
}
@@ -411,7 +431,7 @@ func GetPsContainerOutput(r *libpod.Runtime, opts PsOptions, filters []string, m
if len(filterSplit) < 2 {
return nil, errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f)
}
- generatedFunc, err := generateContainerFilterFuncs(filterSplit[0], filterSplit[1], r)
+ generatedFunc, err := GenerateContainerFilterFuncs(filterSplit[0], filterSplit[1], r)
if err != nil {
return nil, errors.Wrapf(err, "invalid filter")
}
diff --git a/cmd/podman/shared/create.go b/cmd/podman/shared/create.go
index 2fcec6deb..bb4e9cd12 100644
--- a/cmd/podman/shared/create.go
+++ b/cmd/podman/shared/create.go
@@ -7,6 +7,7 @@ import (
"io"
"os"
"path/filepath"
+ goruntime "runtime"
"strconv"
"strings"
"syscall"
@@ -88,9 +89,11 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.
return nil, nil, err
}
+ overrideOS := c.String("override-os")
+ overrideArch := c.String("override-arch")
dockerRegistryOptions := image.DockerRegistryOptions{
- OSChoice: c.String("override-os"),
- ArchitectureChoice: c.String("override-arch"),
+ OSChoice: overrideOS,
+ ArchitectureChoice: overrideArch,
}
newImage, err := runtime.ImageRuntime().New(ctx, name, rtc.SignaturePolicyPath, c.String("authfile"), writer, &dockerRegistryOptions, image.SigningOptions{}, nil, pullType)
@@ -101,6 +104,15 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.
if err != nil {
return nil, nil, err
}
+
+ if overrideOS == "" && data.Os != goruntime.GOOS {
+ return nil, nil, errors.Errorf("incompatible image OS %q on %q host", data.Os, goruntime.GOOS)
+ }
+
+ if overrideArch == "" && data.Architecture != goruntime.GOARCH {
+ return nil, nil, errors.Errorf("incompatible image architecture %q on %q host", data.Architecture, goruntime.GOARCH)
+ }
+
names := newImage.Names()
if len(names) > 0 {
imageName = names[0]
@@ -214,24 +226,24 @@ func configureEntrypoint(c *GenericCLIResults, data *inspect.ImageData) []string
return entrypoint
}
-func configurePod(c *GenericCLIResults, runtime *libpod.Runtime, namespaces map[string]string, podName string) (map[string]string, error) {
+func configurePod(c *GenericCLIResults, runtime *libpod.Runtime, namespaces map[string]string, podName string) (map[string]string, string, error) {
pod, err := runtime.LookupPod(podName)
if err != nil {
- return namespaces, err
+ return namespaces, "", err
}
podInfraID, err := pod.InfraContainerID()
if err != nil {
- return namespaces, err
+ return namespaces, "", err
}
hasUserns := false
if podInfraID != "" {
podCtr, err := runtime.GetContainer(podInfraID)
if err != nil {
- return namespaces, err
+ return namespaces, "", err
}
mappings, err := podCtr.IDMappings()
if err != nil {
- return namespaces, err
+ return namespaces, "", err
}
hasUserns = len(mappings.UIDMap) > 0
}
@@ -251,7 +263,7 @@ func configurePod(c *GenericCLIResults, runtime *libpod.Runtime, namespaces map[
if (namespaces["uts"] == cc.Pod) || (!c.IsSet("uts") && pod.SharesUTS()) {
namespaces["uts"] = fmt.Sprintf("container:%s", podInfraID)
}
- return namespaces, nil
+ return namespaces, podInfraID, nil
}
// Parses CLI options related to container creation into a config which can be
@@ -359,6 +371,10 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
if len(podName) < 1 && c.IsSet("pod") {
return nil, errors.Errorf("new pod name must be at least one character")
}
+
+ // If we are adding a container to a pod, we would like to add an annotation for the infra ID
+ // so kata containers can share VMs inside the pod
+ var podInfraID string
if c.IsSet("pod") {
if strings.HasPrefix(originalPodName, "new:") {
// pod does not exist; lets make it
@@ -387,7 +403,7 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
// The container now cannot have port bindings; so we reset the map
portBindings = make(map[nat.Port][]nat.PortBinding)
}
- namespaces, err = configurePod(c, runtime, namespaces, podName)
+ namespaces, podInfraID, err = configurePod(c, runtime, namespaces, podName)
if err != nil {
return nil, err
}
@@ -485,12 +501,26 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
// ANNOTATIONS
annotations := make(map[string]string)
+
// First, add our default annotations
- annotations[ann.ContainerType] = "sandbox"
annotations[ann.TTY] = "false"
if tty {
annotations[ann.TTY] = "true"
}
+
+ // in the event this container is in a pod, and the pod has an infra container
+ // we will want to configure it as a type "container" instead defaulting to
+ // the behavior of a "sandbox" container
+ // In Kata containers:
+ // - "sandbox" is the annotation that denotes the container should use its own
+ // VM, which is the default behavior
+ // - "container" denotes the container should join the VM of the SandboxID
+ // (the infra container)
+ if podInfraID != "" {
+ annotations[ann.SandboxID] = podInfraID
+ annotations[ann.ContainerType] = ann.ContainerTypeContainer
+ }
+
if data != nil {
// Next, add annotations from the image
for key, value := range data.Annotations {
diff --git a/cmd/podman/start.go b/cmd/podman/start.go
index d4b4534bb..a070cd18d 100644
--- a/cmd/podman/start.go
+++ b/cmd/podman/start.go
@@ -35,6 +35,9 @@ func init() {
startCommand.SetUsageTemplate(UsageTemplate())
flags := startCommand.Flags()
flags.BoolVarP(&startCommand.Attach, "attach", "a", false, "Attach container's STDOUT and STDERR")
+ // Clear the default, the value specified in the config file should have the
+ // priority
+ startCommand.DetachKeys = ""
flags.StringVar(&startCommand.DetachKeys, "detach-keys", define.DefaultDetachKeys, "Select the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`")
flags.BoolVarP(&startCommand.Interactive, "interactive", "i", false, "Keep STDIN open even if not attached")
flags.BoolVarP(&startCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
diff --git a/cmd/podman/stop.go b/cmd/podman/stop.go
index e04d8a12b..c62da80df 100644
--- a/cmd/podman/stop.go
+++ b/cmd/podman/stop.go
@@ -25,7 +25,7 @@ var (
return stopCmd(&stopCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, false)
+ return checkAllLatestAndCIDFile(cmd, args, false, true)
},
Example: `podman stop ctrID
podman stop --latest
@@ -39,10 +39,14 @@ func init() {
stopCommand.SetUsageTemplate(UsageTemplate())
flags := stopCommand.Flags()
flags.BoolVarP(&stopCommand.All, "all", "a", false, "Stop all running containers")
+ flags.BoolVarP(&stopCommand.Ignore, "ignore", "i", false, "Ignore errors when a specified container is missing")
+ flags.StringArrayVarP(&stopCommand.CIDFiles, "cidfile", "", nil, "Read the container ID from the file")
flags.BoolVarP(&stopCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
flags.UintVar(&stopCommand.Timeout, "time", define.CtrRemoveTimeout, "Seconds to wait for stop before killing the container")
flags.UintVarP(&stopCommand.Timeout, "timeout", "t", define.CtrRemoveTimeout, "Seconds to wait for stop before killing the container")
markFlagHiddenForRemoteClient("latest", flags)
+ markFlagHiddenForRemoteClient("cidfile", flags)
+ markFlagHiddenForRemoteClient("ignore", flags)
}
// stopCmd stops a container or containers
diff --git a/cmd/podman/system.go b/cmd/podman/system.go
index 80080bf44..921d0c037 100644
--- a/cmd/podman/system.go
+++ b/cmd/podman/system.go
@@ -19,6 +19,7 @@ var (
)
var systemCommands = []*cobra.Command{
+ _systemResetCommand,
_infoCommand,
_pruneSystemCommand,
}
diff --git a/cmd/podman/system_prune.go b/cmd/podman/system_prune.go
index b499d8dd2..74fdcde99 100644
--- a/cmd/podman/system_prune.go
+++ b/cmd/podman/system_prune.go
@@ -82,7 +82,6 @@ Are you sure you want to continue? [y/N] `, volumeString)
fmt.Println("Deleted Pods")
pruneValues := cliconfig.PodPruneValues{
PodmanCommand: c.PodmanCommand,
- Force: c.Force,
}
ctx := getContext()
ok, failures, lasterr := runtime.PrunePods(ctx, &pruneValues)
@@ -93,7 +92,7 @@ Are you sure you want to continue? [y/N] `, volumeString)
rmWorkers := shared.Parallelize("rm")
fmt.Println("Deleted Containers")
- ok, failures, err = runtime.Prune(ctx, rmWorkers, false)
+ ok, failures, err = runtime.Prune(ctx, rmWorkers, false, []string{})
if err != nil {
if lasterr != nil {
logrus.Errorf("%q", err)
@@ -117,7 +116,8 @@ Are you sure you want to continue? [y/N] `, volumeString)
// Call prune; if any cids are returned, print them and then
// return err in case an error also came up
- pruneCids, err := runtime.PruneImages(ctx, c.All)
+ // TODO: support for filters in system prune
+ pruneCids, err := runtime.PruneImages(ctx, c.All, []string{})
if len(pruneCids) > 0 {
fmt.Println("Deleted Images")
for _, cid := range pruneCids {
diff --git a/cmd/podman/system_renumber.go b/cmd/podman/system_renumber.go
index 81752a177..4e90a2d8c 100644
--- a/cmd/podman/system_renumber.go
+++ b/cmd/podman/system_renumber.go
@@ -44,9 +44,7 @@ func renumberCmd(c *cliconfig.SystemRenumberValues) error {
if err != nil {
return errors.Wrapf(err, "error renumbering locks")
}
- if err := r.Shutdown(false); err != nil {
- return err
- }
+ _ = r.Shutdown(false)
return nil
}
diff --git a/cmd/podman/umount.go b/cmd/podman/umount.go
index c3d81d3a8..6ad485c2c 100644
--- a/cmd/podman/umount.go
+++ b/cmd/podman/umount.go
@@ -28,7 +28,7 @@ var (
return umountCmd(&umountCommand)
},
Args: func(cmd *cobra.Command, args []string) error {
- return checkAllAndLatest(cmd, args, false)
+ return checkAllLatestAndCIDFile(cmd, args, false, false)
},
Example: `podman umount ctrID
podman umount ctrID1 ctrID2 ctrID3
diff --git a/cmd/podman/varlink/io.podman.varlink b/cmd/podman/varlink/io.podman.varlink
index f9339fccb..a3fd27ed6 100644
--- a/cmd/podman/varlink/io.podman.varlink
+++ b/cmd/podman/varlink/io.podman.varlink
@@ -70,7 +70,8 @@ type Image (
labels: [string]string,
isParent: bool,
topLayer: string,
- readOnly: bool
+ readOnly: bool,
+ history: []string
)
# ImageHistory describes the returned structure from ImageHistory.
@@ -535,6 +536,10 @@ method GetVersion() -> (
remote_api_version: int
)
+# Reset resets Podman back to its initial state.
+# Removes all Pods, Containers, Images and Volumes
+method Reset() -> ()
+
# GetInfo returns a [PodmanInfo](#PodmanInfo) struct that describes podman and its host such as storage stats,
# build information of Podman, and system-wide registries.
method GetInfo() -> (info: PodmanInfo)
@@ -1217,7 +1222,7 @@ method UnmountContainer(name: string, force: bool) -> ()
# ImagesPrune removes all unused images from the local store. Upon successful pruning,
# the IDs of the removed images are returned.
-method ImagesPrune(all: bool) -> (pruned: []string)
+method ImagesPrune(all: bool, filter: []string) -> (pruned: []string)
# This function is not implemented yet.
# method ListContainerPorts(name: string) -> (notimplemented: NotImplemented)
diff --git a/cmd/podman/version.go b/cmd/podman/version.go
index 314b2e266..5907241ff 100644
--- a/cmd/podman/version.go
+++ b/cmd/podman/version.go
@@ -37,13 +37,40 @@ func init() {
flags := versionCommand.Flags()
flags.StringVarP(&versionCommand.Format, "format", "f", "", "Change the output format to JSON or a Go template")
}
+func getRemoteVersion(c *cliconfig.VersionValues) (version define.Version, err error) {
+ runtime, err := adapter.GetRuntime(getContext(), &c.PodmanCommand)
+ if err != nil {
+ return version, errors.Wrapf(err, "could not get runtime")
+ }
+ defer runtime.DeferredShutdown(false)
+
+ return runtime.GetVersion()
+}
+
+type versionStruct struct {
+ Client define.Version
+ Server define.Version
+}
// versionCmd gets and prints version info for version command
func versionCmd(c *cliconfig.VersionValues) error {
- clientVersion, err := define.GetVersion()
+
+ var (
+ v versionStruct
+ err error
+ )
+ v.Client, err = define.GetVersion()
if err != nil {
return errors.Wrapf(err, "unable to determine version")
}
+ if remote {
+ v.Server, err = getRemoteVersion(c)
+ if err != nil {
+ return err
+ }
+ } else {
+ v.Server = v.Client
+ }
versionOutputFormat := c.Format
if versionOutputFormat != "" {
@@ -53,11 +80,20 @@ func versionCmd(c *cliconfig.VersionValues) error {
var out formats.Writer
switch versionOutputFormat {
case formats.JSONString:
- out = formats.JSONStruct{Output: clientVersion}
+ out = formats.JSONStruct{Output: v}
+ return out.Out()
default:
- out = formats.StdoutTemplate{Output: clientVersion, Template: versionOutputFormat}
+ out = formats.StdoutTemplate{Output: v, Template: versionOutputFormat}
+ err := out.Out()
+ if err != nil {
+ // On Failure, assume user is using older version of podman version --format and check client
+ out = formats.StdoutTemplate{Output: v.Client, Template: versionOutputFormat}
+ if err1 := out.Out(); err1 != nil {
+ return err
+ }
+ }
}
- return out.Out()
+ return nil
}
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
defer w.Flush()
@@ -66,25 +102,13 @@ func versionCmd(c *cliconfig.VersionValues) error {
if _, err := fmt.Fprintf(w, "Client:\n"); err != nil {
return err
}
- }
- formatVersion(w, clientVersion)
-
- if remote {
- if _, err := fmt.Fprintf(w, "\nService:\n"); err != nil {
- return err
- }
-
- runtime, err := adapter.GetRuntime(getContext(), &c.PodmanCommand)
- if err != nil {
- return errors.Wrapf(err, "could not get runtime")
- }
- defer runtime.DeferredShutdown(false)
-
- serviceVersion, err := runtime.GetVersion()
- if err != nil {
+ formatVersion(w, v.Client)
+ if _, err := fmt.Fprintf(w, "\nServer:\n"); err != nil {
return err
}
- formatVersion(w, serviceVersion)
+ formatVersion(w, v.Server)
+ } else {
+ formatVersion(w, v.Client)
}
return nil
}
diff --git a/commands.md b/commands.md
index c035b011f..de9169a4b 100644
--- a/commands.md
+++ b/commands.md
@@ -6,89 +6,95 @@
| Command | Description | Demo | Script |
| :----------------------------------------------------------------------- | :------------------------------------------------------------------------- | :-------------------------------------------------------------------------- | :---------------------------------------------------------------------------------- |
-| [podman(1)](/docs/podman.1.md) | Simple management tool for pods and images |
-| [podman-attach(1)](/docs/podman-attach.1.md) | Attach to a running container |
-| [podman-build(1)](/docs/podman-build.1.md) | Build an image using instructions from Dockerfiles |
-| [podman-commit(1)](/docs/podman-commit.1.md) | Create new image based on the changed container |
-| [podman-container(1)](/docs/podman-container.1.md) | Manage Containers |
-| [podman-container-checkpoint(1)](/docs/podman-container-checkpoint.1.md) | Checkpoints one or more running containers |
-| [podman-container-cleanup(1)](/docs/podman-container-cleanup.1.md) | Cleanup Container storage and networks |
-| [podman-container-exists(1)](/docs/podman-container-exists.1.md) | Check if an container exists in local storage |
-| [podman-container-prune(1)](/docs/podman-container-prune.1.md) | Remove all stopped containers |
-| [podman-container-refresh(1)](/docs/podman-container-refresh.1.md) | Refresh all containers state in database |
-| [podman-container-restore(1)](/docs/podman-container-restore.1.md) | Restores one or more running containers |
-| [podman-container-runlabel(1)](/docs/podman-container-runlabel.1.md) | Execute Image Label Method |
-| [podman-cp(1)](/docs/podman-cp.1.md) | Copy files/folders between a container and the local filesystem |
-| [podman-create(1)](/docs/podman-create.1.md) | Create a new container |
-| [podman-diff(1)](/docs/podman-diff.1.md) | Inspect changes on a container or image's filesystem |
-| [podman-events(1)](/docs/podman-events.1.md) | Monitor Podman events |
-| [podman-exec(1)](/docs/podman-exec.1.md) | Execute a command in a running container |
-| [podman-export(1)](/docs/podman-export.1.md) | Export container's filesystem contents as a tar archive |
-| [podman-generate(1)](/docs/podman-generate.1.md) | Generate structured output based on Podman containers and pods |
-| [podman-generate-kube(1)](/docs/podman-generate-kube.1.md) | Generate Kubernetes YAML based on a container or Pod |
-| [podman-generate-systemd(1)](/docs/podman-generate-systemd.1.md) | Generate a Systemd unit file for a container |
-| [podman-history(1)](/docs/podman-history.1.md) | Shows the history of an image |
-| [podman-image(1)](/docs/podman-image.1.md) | Manage Images |
-| [podman-image-exists(1)](/docs/podman-image-exists.1.md) | Check if an image exists in local storage |
-| [podman-image-prune(1)](/docs/podman-image-prune.1.md) | Remove all unused images |
-| [podman-image-sign(1)](/docs/podman-image-sign.1.md) | Create a signature for an image |
-| [podman-image-trust(1)](/docs/podman-image-trust.1.md) | Manage container registry image trust policy |
-| [podman-images(1)](/docs/podman-images.1.md) | List images in local storage | [![...](/docs/play.png)](https://podman.io/asciinema/podman/images/) | [Here](https://github.com/containers/Demos/blob/master/podman_cli/podman_images.sh) |
-| [podman-import(1)](/docs/podman-import.1.md) | Import a tarball and save it as a filesystem image |
-| [podman-info(1)](/docs/podman-info.1.md) | Display system information |
-| [podman-init(1)](/docs/podman-init.1.md) | Initialize a container |
-| [podman-inspect(1)](/docs/podman-inspect.1.md) | Display the configuration of a container or image | [![...](/docs/play.png)](https://asciinema.org/a/133418) |
-| [podman-kill(1)](/docs/podman-kill.1.md) | Kill the main process in one or more running containers |
-| [podman-load(1)](/docs/podman-load.1.md) | Load an image from a container image archive |
-| [podman-login(1)](/docs/podman-login.1.md) | Login to a container registry |
-| [podman-logout(1)](/docs/podman-logout.1.md) | Logout of a container registry |
-| [podman-logs(1)](/docs/podman-logs.1.md) | Display the logs of a container |
-| [podman-mount(1)](/docs/podman-mount.1.md) | Mount a working container's root filesystem |
-| [podman-network(1)](/docs/podman-network.1.md) | Manage Podman CNI networks |
-| [podman-network-create(1)](/docs/podman-network-create.1.md) | Create a CNI network |
-| [podman-network-inspect(1)](/docs/podman-network-inspect.1.md) | Inspect one or more Podman networks |
-| [podman-network-ls(1)](/docs/podman-network-ls.1.md) | Display a summary of Podman networks |
-| [podman-network-rm(1)](/docs/podman-network-rm.1.md) | Remove one or more Podman networks |
-| [podman-pause(1)](/docs/podman-pause.1.md) | Pause one or more running containers | [![...](/docs/play.png)](https://podman.io/asciinema/podman/pause_unpause/) | [Here](https://github.com/containers/Demos/blob/master/podman_cli/podman_pause_unpause.sh) |
-| [podman-play(1)](/docs/podman-play.1.md) | Play pods and containers based on a structured input file |
-| [podman-pod(1)](/docs/podman-pod.1.md) | Simple management tool for groups of containers, called pods |
-| [podman-pod-create(1)](/docs/podman-pod-create.1.md) | Create a new pod |
-| [podman-pod-inspect(1)](/docs/podman-pod-inspect.1.md) | Inspect a pod |
+| [podman(1)](/docs/source/markdown/podman.1.md) | Simple management tool for pods and images |
+| [podman-attach(1)](/docs/source/markdown/podman-attach.1.md) | Attach to a running container |
+| [podman-build(1)](/docs/source/markdown/podman-build.1.md) | Build an image using instructions from Dockerfiles |
+| [podman-commit(1)](/docs/source/markdown/podman-commit.1.md) | Create new image based on the changed container |
+| [podman-container(1)](/docs/source/markdown/podman-container.1.md) | Manage Containers |
+| [podman-container-checkpoint(1)](/docs/source/markdown/podman-container-checkpoint.1.md) | Checkpoints one or more running containers |
+| [podman-container-cleanup(1)](/docs/source/markdown/podman-container-cleanup.1.md) | Cleanup Container storage and networks |
+| [podman-container-exists(1)](/docs/source/markdown/podman-container-exists.1.md) | Check if an container exists in local storage |
+| [podman-container-prune(1)](/docs/source/markdown/podman-container-prune.1.md) | Remove all stopped containers |
+| [podman-container-refresh(1)](/docs/source/markdown/podman-container-refresh.1.md) | Refresh all containers state in database |
+| [podman-container-restore(1)](/docs/source/markdown/podman-container-restore.1.md) | Restores one or more running containers |
+| [podman-container-runlabel(1)](/docs/source/markdown/podman-container-runlabel.1.md) | Execute Image Label Method |
+| [podman-cp(1)](/docs/source/markdown/podman-cp.1.md) | Copy files/folders between a container and the local filesystem |
+| [podman-create(1)](/docs/source/markdown/podman-create.1.md) | Create a new container |
+| [podman-diff(1)](/docs/source/markdown/podman-diff.1.md) | Inspect changes on a container or image's filesystem |
+| [podman-events(1)](/docs/source/markdown/podman-events.1.md) | Monitor Podman events |
+| [podman-exec(1)](/docs/source/markdown/podman-exec.1.md) | Execute a command in a running container |
+| [podman-export(1)](/docs/source/markdown/podman-export.1.md) | Export container's filesystem contents as a tar archive |
+| [podman-generate(1)](/docs/source/markdown/podman-generate.1.md) | Generate structured output based on Podman containers and pods |
+| [podman-generate-kube(1)](/docs/source/markdown/podman-generate-kube.1.md) | Generate Kubernetes YAML based on a container or Pod |
+| [podman-generate-systemd(1)](/docs/source/markdown/podman-generate-systemd.1.md) | Generate a Systemd unit file for a container |
+| [podman-history(1)](/docs/source/markdown/podman-history.1.md) | Shows the history of an image |
+| [podman-image(1)](/docs/source/markdown/podman-image.1.md) | Manage Images |
+| [podman-image-exists(1)](/docs/source/markdown/podman-image-exists.1.md) | Check if an image exists in local storage |
+| [podman-image-prune(1)](/docs/source/markdown/podman-image-prune.1.md) | Remove all unused images |
+| [podman-image-sign(1)](/docs/source/markdown/podman-image-sign.1.md) | Create a signature for an image |
+| [podman-image-trust(1)](/docs/source/markdown/podman-image-trust.1.md) | Manage container registry image trust policy |
+| [podman-images(1)](/docs/source/markdown/podman-images.1.md) | List images in local storage | [![...](/docs/source/markdown/play.png)](https://podman.io/asciinema/podman/images/) | [Here](https://github.com/containers/Demos/blob/master/podman_cli/podman_images.sh) |
+| [podman-import(1)](/docs/source/markdown/podman-import.1.md) | Import a tarball and save it as a filesystem image |
+| [podman-info(1)](/docs/source/markdown/podman-info.1.md) | Display system information |
+| [podman-init(1)](/docs/source/markdown/podman-init.1.md) | Initialize a container |
+| [podman-inspect(1)](/docs/source/markdown/podman-inspect.1.md) | Display the configuration of a container or image | [![...](/docs/source/markdown/play.png)](https://asciinema.org/a/133418) |
+| [podman-kill(1)](/docs/source/markdown/podman-kill.1.md) | Kill the main process in one or more running containers |
+| [podman-load(1)](/docs/source/markdown/podman-load.1.md) | Load an image from a container image archive |
+| [podman-login(1)](/docs/source/markdown/podman-login.1.md) | Login to a container registry |
+| [podman-logout(1)](/docs/source/markdown/podman-logout.1.md) | Logout of a container registry |
+| [podman-logs(1)](/docs/source/markdown/podman-logs.1.md) | Display the logs of a container |
+| [podman-mount(1)](/docs/source/markdown/podman-mount.1.md) | Mount a working container's root filesystem |
+| [podman-network(1)](/docs/source/markdown/podman-network.1.md) | Manage Podman CNI networks |
+| [podman-network-create(1)](/docs/source/markdown/podman-network-create.1.md) | Create a CNI network |
+| [podman-network-inspect(1)](/docs/source/markdown/podman-network-inspect.1.md) | Inspect one or more Podman networks |
+| [podman-network-ls(1)](/docs/source/markdown/podman-network-ls.1.md) | Display a summary of Podman networks |
+| [podman-network-rm(1)](/docs/source/markdown/podman-network-rm.1.md) | Remove one or more Podman networks |
+| [podman-pause(1)](/docs/source/markdown/podman-pause.1.md) | Pause one or more running containers | [![...](/docs/source/markdown/play.png)](https://podman.io/asciinema/podman/pause_unpause/) | [Here](https://github.com/containers/Demos/blob/master/podman_cli/podman_pause_unpause.sh) |
+| [podman-play(1)](/docs/source/markdown/podman-play.1.md) | Play pods and containers based on a structured input file |
+| [podman-pod(1)](/docs/source/markdown/podman-pod.1.md) | Simple management tool for groups of containers, called pods |
+| [podman-pod-create(1)](/docs/source/markdown/podman-pod-create.1.md) | Create a new pod |
+| [podman-pod-inspect(1)](/docs/source/markdown/podman-pod-inspect.1.md) | Inspect a pod |
| [podman-pod-kill(1)](podman-pod-kill.1.md) | Kill the main process of each container in pod. |
-| [podman-pod-ps(1)](/docs/podman-pod-ps.1.md) | List the pods on the system |
+| [podman-pod-ps(1)](/docs/source/markdown/podman-pod-ps.1.md) | List the pods on the system |
| [podman-pod-pause(1)](podman-pod-pause.1.md) | Pause one or more pods. |
-| [podman-pod-restart](/docs/podman-pod-restart.1.md) | Restart one or more pods |
-| [podman-pod-rm(1)](/docs/podman-pod-rm.1.md) | Remove one or more pods |
-| [podman-pod-start(1)](/docs/podman-pod-start.1.md) | Start one or more pods |
-| [podman-pod-stats(1)](/docs/podman-pod-stats.1.md) | Display a live stream of one or more pods' resource usage statistics | | |
-| [podman-pod-stop(1)](/docs/podman-pod-stop.1.md) | Stop one or more pods |
-| [podman-pod-top(1)](/docs/podman-pod-top.1.md) | Display the running processes of a pod |
+| [podman-pod-restart](/docs/source/markdown/podman-pod-restart.1.md) | Restart one or more pods |
+| [podman-pod-rm(1)](/docs/source/markdown/podman-pod-rm.1.md) | Remove one or more pods |
+| [podman-pod-start(1)](/docs/source/markdown/podman-pod-start.1.md) | Start one or more pods |
+| [podman-pod-stats(1)](/docs/source/markdown/podman-pod-stats.1.md) | Display a live stream of one or more pods' resource usage statistics | | |
+| [podman-pod-stop(1)](/docs/source/markdown/podman-pod-stop.1.md) | Stop one or more pods |
+| [podman-pod-top(1)](/docs/source/markdown/podman-pod-top.1.md) | Display the running processes of a pod |
| [podman-pod-unpause(1)](podman-pod-unpause.1.md) | Unpause one or more pods. |
-| [podman-port(1)](/docs/podman-port.1.md) | List port mappings for running containers |
-| [podman-ps(1)](/docs/podman-ps.1.md) | Prints out information about containers |
-| [podman-pull(1)](/docs/podman-pull.1.md) | Pull an image from a registry |
-| [podman-push(1)](/docs/podman-push.1.md) | Push an image to a specified destination | [![...](/docs/play.png)](https://asciinema.org/a/133276) |
-| [podman-restart](/docs/podman-restart.1.md) | Restarts one or more containers | [![...](/docs/play.png)](https://asciinema.org/a/jiqxJAxcVXw604xdzMLTkQvHM) |
-| [podman-rm(1)](/docs/podman-rm.1.md) | Removes one or more containers |
-| [podman-rmi(1)](/docs/podman-rmi.1.md) | Removes one or more images |
-| [podman-run(1)](/docs/podman-run.1.md) | Run a command in a container |
-| [podman-save(1)](/docs/podman-save.1.md) | Saves an image to an archive |
-| [podman-search(1)](/docs/podman-search.1.md) | Search a registry for an image |
-| [podman-start(1)](/docs/podman-start.1.md) | Starts one or more containers |
-| [podman-stats(1)](/docs/podman-stats.1.md) | Display a live stream of one or more containers' resource usage statistics |
-| [podman-stop(1)](/docs/podman-stop.1.md) | Stops one or more running containers |
-| [podman-system(1)](/docs/podman-system.1.md) | Manage podman |
-| [podman-tag(1)](/docs/podman-tag.1.md) | Add an additional name to a local image | [![...](/docs/play.png)](https://asciinema.org/a/133803) |
-| [podman-top(1)](/docs/podman-top.1.md) | Display the running processes of a container |
-| [podman-umount(1)](/docs/podman-umount.1.md) | Unmount a working container's root filesystem |
-| [podman-unpause(1)](/docs/podman-unpause.1.md) | Unpause one or more running containers | [![...](/docs/play.png)](https://podman.io/asciinema/podman/pause_unpause/) | [Here](https://github.com/containers/Demos/blob/master/podman_cli/podman_pause_unpause.sh) |
-| [podman-unshare(1)](/docs/podman-unshare.1.md) | Run a command inside of a modified user namespace. |
-| [podman-varlink(1)](/docs/podman-varlink.1.md) | Run the varlink backend |
-| [podman-version(1)](/docs/podman-version.1.md) | Display the version information |
-| [podman-volume(1)](/docs/podman-volume.1.md) | Manage Volumes |
-| [podman-volume-create(1)](/docs/podman-volume-create.1.md) | Create a volume |
-| [podman-volume-inspect(1)](/docs/podman-volume-inspect.1.md) | Get detailed information on one or more volumes |
-| [podman-volume-ls(1)](/docs/podman-volume-ls.1.md) | List all the available volumes |
-| [podman-volume-rm(1)](/docs/podman-volume-rm.1.md) | Remove one or more volumes |
-| [podman-volume-prune(1)](/docs/podman-volume-prune.1.md) | Remove all unused volumes |
-| [podman-wait(1)](/docs/podman-wait.1.md) | Wait on one or more containers to stop and print their exit codes |
+| [podman-port(1)](/docs/source/markdown/podman-port.1.md) | List port mappings for running containers |
+| [podman-ps(1)](/docs/source/markdown/podman-ps.1.md) | Prints out information about containers |
+| [podman-pull(1)](/docs/source/markdown/podman-pull.1.md) | Pull an image from a registry |
+| [podman-push(1)](/docs/source/markdown/podman-push.1.md) | Push an image to a specified destination | [![...](/docs/source/markdown/play.png)](https://asciinema.org/a/133276) |
+| [podman-restart](/docs/source/markdown/podman-restart.1.md) | Restarts one or more containers | [![...](/docs/source/markdown/play.png)](https://asciinema.org/a/jiqxJAxcVXw604xdzMLTkQvHM) |
+| [podman-rm(1)](/docs/source/markdown/podman-rm.1.md) | Removes one or more containers |
+| [podman-rmi(1)](/docs/source/markdown/podman-rmi.1.md) | Removes one or more images |
+| [podman-run(1)](/docs/source/markdown/podman-run.1.md) | Run a command in a container |
+| [podman-save(1)](/docs/source/markdown/podman-save.1.md) | Saves an image to an archive |
+| [podman-search(1)](/docs/source/markdown/podman-search.1.md) | Search a registry for an image |
+| [podman-start(1)](/docs/source/markdown/podman-start.1.md) | Starts one or more containers |
+| [podman-stats(1)](/docs/source/markdown/podman-stats.1.md) | Display a live stream of one or more containers' resource usage statistics |
+| [podman-stop(1)](/docs/source/markdown/podman-stop.1.md) | Stops one or more running containers |
+| [podman-system(1)](/docs/source/markdown/podman-system.1.md) | Manage podman |
+| [podman-system-df(1)](/docs/source/markdown/podman-system-df.1.md) | Show podman disk usage. |
+| [podman-system-info(1)](/docs/source/markdown/podman-info.1.md) | Displays Podman related system information. |
+| [podman-system-migrate(1)](/docs/source/markdown/podman-system-migrate.1.md) | Migrate existing containers to a new podman version. |
+| [podman-system-prune(1)](/docs/source/markdown/podman-system-prune.1.md) | Remove all unused container, image and volume data. |
+| [podman-system-renumber(1)](/docs/source/markdown/podman-system-renumber.1.md) | Migrate lock numbers to handle a change in maximum number of locks. |
+| [podman-system-reset(1)](/docs/source/markdown/podman-system-reset.1.md) | Reset storage back to original state. Remove all pods, containers, images, volumes. |
+| [podman-tag(1)](/docs/source/markdown/podman-tag.1.md) | Add an additional name to a local image | [![...](/docs/source/markdown/play.png)](https://asciinema.org/a/133803) |
+| [podman-top(1)](/docs/source/markdown/podman-top.1.md) | Display the running processes of a container |
+| [podman-umount(1)](/docs/source/markdown/podman-umount.1.md) | Unmount a working container's root filesystem |
+| [podman-unpause(1)](/docs/source/markdown/podman-unpause.1.md) | Unpause one or more running containers | [![...](/docs/source/markdown/play.png)](https://podman.io/asciinema/podman/pause_unpause/) | [Here](https://github.com/containers/Demos/blob/master/podman_cli/podman_pause_unpause.sh) |
+| [podman-unshare(1)](/docs/source/markdown/podman-unshare.1.md) | Run a command inside of a modified user namespace. |
+| [podman-varlink(1)](/docs/source/markdown/podman-varlink.1.md) | Run the varlink backend |
+| [podman-version(1)](/docs/source/markdown/podman-version.1.md) | Display the version information |
+| [podman-volume(1)](/docs/source/markdown/podman-volume.1.md) | Manage Volumes |
+| [podman-volume-create(1)](/docs/source/markdown/podman-volume-create.1.md) | Create a volume |
+| [podman-volume-inspect(1)](/docs/source/markdown/podman-volume-inspect.1.md) | Get detailed information on one or more volumes |
+| [podman-volume-ls(1)](/docs/source/markdown/podman-volume-ls.1.md) | List all the available volumes |
+| [podman-volume-rm(1)](/docs/source/markdown/podman-volume-rm.1.md) | Remove one or more volumes |
+| [podman-volume-prune(1)](/docs/source/markdown/podman-volume-prune.1.md) | Remove all unused volumes |
+| [podman-wait(1)](/docs/source/markdown/podman-wait.1.md) | Wait on one or more containers to stop and print their exit codes |
diff --git a/completions/bash/podman b/completions/bash/podman
index 379c33711..f37b45837 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -1143,6 +1143,21 @@ _podman_container() {
esac
}
+_podman_system_reset() {
+ local options_with_args="
+ "
+ local boolean_options="
+ -h
+ --help
+ --force
+ "
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+}
+
_podman_system_df() {
local options_with_args="
--format
@@ -1194,6 +1209,7 @@ _podman_system() {
df
info
prune
+ reset
"
__podman_subcommands "$subcommands" && return
@@ -1564,6 +1580,7 @@ _podman_images() {
--filter
-h
--help
+ --history
--no-trunc
--notruncate
-n
@@ -2151,10 +2168,13 @@ _podman_rm() {
local boolean_options="
--all
-a
+ --cidfile
--force
-f
--help
-h
+ --ignore
+ -i
--latest
-l
--storage
@@ -2427,8 +2447,11 @@ _podman_stop() {
local boolean_options="
--all
-a
+ --cidfile
-h
--help
+ --ignore
+ -i
--latest
-l
"
@@ -2810,9 +2833,12 @@ _podman_images_prune() {
_podman_container_prune() {
local options_with_args="
+ --filter
"
local boolean_options="
+ -f
+ --force
-h
--help
"
@@ -2989,6 +3015,8 @@ _podman_pod_rm() {
--all
--help
-h
+ --ignore
+ -i
-f
--force
--latest
@@ -3039,6 +3067,8 @@ _podman_pod_stop() {
-a
--cleanup
--help
+ --ignore
+ -i
-h
--latest
-l
diff --git a/contrib/build_rpm.sh b/contrib/build_rpm.sh
index 1132ef380..e41763fa7 100644..100755
--- a/contrib/build_rpm.sh
+++ b/contrib/build_rpm.sh
@@ -1,15 +1,12 @@
#!/bin/bash
-set -x
-
-pkg_manager=`command -v dnf`
-if [ -z "$pkg_manager" ]; then
- pkg_manager=`command -v yum`
-fi
+set -euxo pipefail
+# returned path can vary: /usr/bin/dnf /bin/dnf ...
+pkg_manager=`command -v dnf yum | head -n1`
echo "Package manager binary: $pkg_manager"
-if [ $pkg_manager == "/usr/bin/yum" ]; then
+if [[ $pkg_manager == *yum ]]; then
echo "[virt7-container-common-candidate]
name=virt7-container-common-candidate
baseurl=https://cbs.centos.org/repos/virt7-container-common-candidate/x86_64/os/
@@ -22,35 +19,39 @@ declare -a PKGS=(device-mapper-devel \
glib2-devel \
glibc-static \
golang \
- golang-github-cpuguy83-go-md2man \
gpgme-devel \
libassuan-devel \
libseccomp-devel \
libselinux-devel \
make \
- golang-github-cpuguy83-go-md2man \
rpm-build \
- btrfs-progs-devel \
go-compilers-golang-compiler \
)
-if [ $pkg_manager == "/usr/bin/dnf" ]; then
+if [[ $pkg_manager == *dnf ]]; then
PKGS+=(python3-devel \
python3-varlink \
)
+ # btrfs-progs-devel is not available in CentOS/RHEL-8
+ if ! grep -i -q 'Red Hat\|CentOS' /etc/redhat-release; then
+ PKGS+=(btrfs-progs-devel)
+ fi
+ # disable doc until go-md2man rpm becomes available
+ # disable debug to avoid error: Empty %files file ~/rpmbuild/BUILD/libpod-.../debugsourcefiles.list
+ export extra_arg="--without doc --without debug"
+else
+ if ! grep -i -q 'Red Hat\|CentOS' /etc/redhat-release; then
+ PKGS+=(golang-github-cpuguy83-go-md2man)
+ fi
fi
echo ${PKGS[*]}
-$pkg_manager install -y ${PKGS[*]}
+sudo $pkg_manager install -y ${PKGS[*]}
make -f .copr/Makefile
-rpmbuild --rebuild podman-*.src.rpm
-
-# Test to make sure the install of the binary works
-$pkg_manager -y install ~/rpmbuild/RPMS/x86_64/podman-*.x86_64.rpm
-
-
-# If we built python/varlink packages, we should test their installs too
-if [ $pkg_manager == "/usr/bin/dnf" ]; then
- $pkg_manager -y install ~/rpmbuild/RPMS/noarch/python*
+# workaround for https://github.com/containers/libpod/issues/4627
+if [ -d ~/rpmbuild/BUILD ]; then
+ chmod -R +w ~/rpmbuild/BUILD
fi
+
+rpmbuild --rebuild ${extra_arg:-} podman-*.src.rpm
diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in
index 33ecc8eba..4a3704811 100644
--- a/contrib/spec/podman.spec.in
+++ b/contrib/spec/podman.spec.in
@@ -1,8 +1,9 @@
%global with_devel 0
%global with_bundled 1
-%global with_debug 1
%global with_check 0
%global with_unit_test 0
+%bcond_without doc
+%bcond_without debug
%if 0%{?fedora} >= 28
%bcond_without varlink
@@ -10,7 +11,7 @@
%bcond_with varlink
%endif
-%if 0%{?with_debug}
+%if %{with debug}
%global _find_debuginfo_dwz_opts %{nil}
%global _dwz_low_mem_die_limit 0
%else
@@ -52,12 +53,17 @@ ExclusiveArch: aarch64 %{arm} ppc64le s390x x86_64
# The COPR process will uncomment this
#BuildRequires: golang-bin
#
+# btrfs-progs-devel package is not available in CentOS/RHEL-8
+%if 0%{?rhel} != 8 && 0%{?centos} != 8
BuildRequires: btrfs-progs-devel
+%endif
BuildRequires: glib2-devel
BuildRequires: glibc-devel
BuildRequires: glibc-static
BuildRequires: git
+%if %{with doc}
BuildRequires: go-md2man
+%endif
BuildRequires: gpgme-devel
BuildRequires: libassuan-devel
BuildRequires: libgpg-error-devel
@@ -349,6 +355,15 @@ This package contains unit tests for project
providing packages with %{import_path} prefix.
%endif
+%if %{with doc}
+%package manpages
+Summary: Man pages for the %{name} commands
+BuildArch: noarch
+
+%description manpages
+Man pages for the %{name} commands
+%endif
+
%prep
%autosetup -Sgit -n %{repo}-%{shortcommit0}
@@ -357,7 +372,9 @@ tar zxf %{SOURCE1}
sed -i 's/install.remote: podman-remote/install.remote:/' Makefile
sed -i 's/install.bin: podman/install.bin:/' Makefile
+%if %{with doc}
sed -i 's/install.man: docs/install.man:/' Makefile
+%endif
%build
mkdir _build
@@ -370,8 +387,12 @@ export GOPATH=$(pwd)/_build:$(pwd):$(pwd):%{gopath}
export BUILDTAGS="varlink selinux seccomp $(hack/btrfs_installed_tag.sh) $(hack/btrfs_tag.sh) $(hack/libdm_tag.sh) exclude_graphdriver_devicemapper"
GOPATH=$GOPATH go generate ./cmd/podman/varlink/...
-BUILDTAGS=$BUILDTAGS make binaries docs
+%if %{with doc}
+BUILDTAGS=$BUILDTAGS make binaries docs
+%else
+BUILDTAGS=$BUILDTAGS make binaries
+%endif
# build conmon
pushd conmon
@@ -388,6 +409,7 @@ popd
%install
install -dp %{buildroot}%{_unitdir}
install -dp %{buildroot}%{_usr}/lib/systemd/user
+%if %{with doc}
PODMAN_VERSION=%{version} %{__make} PREFIX=%{buildroot}%{_prefix} ETCDIR=%{buildroot}%{_sysconfdir} \
install.bin \
install.remote \
@@ -395,6 +417,14 @@ PODMAN_VERSION=%{version} %{__make} PREFIX=%{buildroot}%{_prefix} ETCDIR=%{build
install.cni \
install.systemd \
install.completions
+%else
+PODMAN_VERSION=%{version} %{__make} PREFIX=%{buildroot}%{_prefix} ETCDIR=%{buildroot}%{_sysconfdir} \
+ install.bin \
+ install.remote \
+ install.cni \
+ install.systemd \
+ install.completions
+%endif
mv pkg/hooks/README.md pkg/hooks/README-hooks.md
@@ -477,8 +507,6 @@ export GOPATH=%{buildroot}/%{gopath}:$(pwd)/vendor:%{gopath}
%license LICENSE
%doc README.md CONTRIBUTING.md pkg/hooks/README-hooks.md install.md code-of-conduct.md transfer.md
%{_bindir}/%{name}
-%{_mandir}/man1/*.1*
-%{_mandir}/man5/*.5*
%{_datadir}/bash-completion/completions/*
%{_datadir}/zsh/site-functions/*
%{_libexecdir}/%{name}/conmon
@@ -508,6 +536,12 @@ export GOPATH=%{buildroot}/%{gopath}:$(pwd)/vendor:%{gopath}
%doc README.md CONTRIBUTING.md pkg/hooks/README-hooks.md install.md code-of-conduct.md transfer.md
%{_bindir}/%{name}-remote
+%if %{with doc}
+%files manpages
+%{_mandir}/man1/*.1*
+%{_mandir}/man5/*.5*
+%endif
+
%changelog
* Sat Aug 4 2018 Dan Walsh <dwalsh@redhat.com> - 0.8.1-1.git6b4ab2a
- Bump to v0.8.1
diff --git a/docs/source/markdown/podman-container-prune.1.md b/docs/source/markdown/podman-container-prune.1.md
index d8a4b7f4e..856843a80 100644
--- a/docs/source/markdown/podman-container-prune.1.md
+++ b/docs/source/markdown/podman-container-prune.1.md
@@ -20,6 +20,8 @@ Print usage statement
Remove all stopped containers from local storage
```
$ sudo podman container prune
+WARNING! This will remove all stopped containers.
+Are you sure you want to continue? [y/N] y
878392adf2e6c5c9bb1fc19b69d37d2e98c8abf9d539c0bce4b15b46bbcce471
37664467fbe3618bf9479c34393ac29c02696675addf1750f9e346581636cde7
ed0c6468b8e1cb641b4621d1fe30cb477e1fefc5c0bceb66feaf2f7cb50e5962
@@ -28,6 +30,26 @@ fff1c5b6c3631746055ec40598ce8ecaa4b82aef122f9e3a85b03b55c0d06c23
602d343cd47e7cb3dfc808282a9900a3e4555747787ec6723bb68cedab8384d5
```
+Remove all stopped containers from local storage without confirmation.
+```
+$ sudo podman container prune -f
+878392adf2e6c5c9bb1fc19b69d37d2e98c8abf9d539c0bce4b15b46bbcce471
+37664467fbe3618bf9479c34393ac29c02696675addf1750f9e346581636cde7
+ed0c6468b8e1cb641b4621d1fe30cb477e1fefc5c0bceb66feaf2f7cb50e5962
+6ac6c8f0067b7a4682e6b8e18902665b57d1a0e07e885d9abcd382232a543ccd
+fff1c5b6c3631746055ec40598ce8ecaa4b82aef122f9e3a85b03b55c0d06c23
+602d343cd47e7cb3dfc808282a9900a3e4555747787ec6723bb68cedab8384d5
+
+```
+
+Remove all stopped containers from local storage created within last 10 minutes
+```
+$ sudo podman container prune --filter until="10m"
+WARNING! This will remove all stopped containers.
+Are you sure you want to continue? [y/N] y
+3d366295e33d8cc612c4d873199bacadd55088d90d17dcafaa9a2d317ad50b4e
+```
+
## SEE ALSO
podman(1), podman-ps
diff --git a/docs/source/markdown/podman-create.1.md b/docs/source/markdown/podman-create.1.md
index 0ddcc7ed8..82d2e8f6a 100644
--- a/docs/source/markdown/podman-create.1.md
+++ b/docs/source/markdown/podman-create.1.md
@@ -817,6 +817,10 @@ container. The `OPTIONS` are a comma delimited list and can be:
* [rw|ro]
* [z|Z]
* [`[r]shared`|`[r]slave`|`[r]private`]
+* [`[r]bind`]
+* [`noexec`|`exec`]
+* [`nodev`|`dev`]
+* [`nosuid`|`suid`]
The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The volume
will be mounted into the container at this directory.
@@ -870,6 +874,25 @@ where source dir is mounted on) has to have right propagation properties. For
shared volumes, source mount point has to be shared. And for slave volumes,
source mount has to be either shared or slave.
+If you want to recursively mount a volume and all of it's submounts into a
+container, then you can use the `rbind` option. By default the bind option is
+used, and submounts of the source directory will not be mounted into the
+container.
+
+Mounting the volume with the `nosuid` options means that SUID applications on
+the volume will not be able to change their privilege. By default volumes
+are mounted with `nosuid`.
+
+Mounting the volume with the noexec option means that no executables on the
+volume will be able to executed within the container.
+
+Mounting the volume with the nodev option means that no devices on the volume
+will be able to be used by processes within the container. By default volumes
+are mounted with `nodev`.
+
+If the <source-dir> is a mount point, then "dev", "suid", and "exec" options are
+ignored by the kernel.
+
Use `df <source-dir>` to figure out the source mount and then use
`findmnt -o TARGET,PROPAGATION <source-mount-dir>` to figure out propagation
properties of source mount. If `findmnt` utility is not available, then one
diff --git a/docs/source/markdown/podman-image-prune.1.md b/docs/source/markdown/podman-image-prune.1.md
index b844a9f63..0155ebcd1 100644
--- a/docs/source/markdown/podman-image-prune.1.md
+++ b/docs/source/markdown/podman-image-prune.1.md
@@ -25,13 +25,16 @@ Print usage statement
Remove all dangling images from local storage
```
$ sudo podman image prune
+
+WARNING! This will remove all dangling images.
+Are you sure you want to continue? [y/N] y
f3e20dc537fb04cb51672a5cb6fdf2292e61d411315549391a0d1f64e4e3097e
324a7a3b2e0135f4226ffdd473e4099fd9e477a74230cdc35de69e84c0f9d907
```
-Remove all unused images from local storage
+Remove all unused images from local storage without confirming
```
-$ sudo podman image prune -a
+$ sudo podman image prune -a -f
f3e20dc537fb04cb51672a5cb6fdf2292e61d411315549391a0d1f64e4e3097e
324a7a3b2e0135f4226ffdd473e4099fd9e477a74230cdc35de69e84c0f9d907
6125002719feb1ddf3030acab1df6156da7ce0e78e571e9b6e9c250424d6220c
@@ -41,6 +44,40 @@ e4e5109420323221f170627c138817770fb64832da7d8fe2babd863148287fca
```
+Remove all unused images from local storage since given time/hours.
+```
+$ sudo podman image prune -a --filter until=2019-11-14T06:15:42.937792374Z
+
+WARNING! This will remove all dangling images.
+Are you sure you want to continue? [y/N] y
+e813d2135f17fadeffeea8159a34cfdd4c30b98d8111364b913a91fd930643e9
+5e6572320437022e2746467ddf5b3561bf06e099e8e6361df27e0b2a7ed0b17b
+58fda2abf5042b35dfe04e5f8ee458a3cc26375bf309efb42c078b551a2055c7
+6d2bd30fe924d3414b64bd3920760617e6ced872364bc3bc6959a623252da002
+33d1c829be64a1e1d379caf4feec1f05a892c3ef7aa82c0be53d3c08a96c59c5
+f9f0a8a58c9e02a2b3250b88cc5c95b1e10245ca2c4161d19376580aaa90f55c
+1ef14d5ede80db78978b25ad677fd3e897a578c3af614e1fda608d40c8809707
+45e1482040e441a521953a6da2eca9bafc769e15667a07c23720d6e0cafc3ab2
+
+$ sudo podman image prune -f --filter until=10h
+f3e20dc537fb04cb51672a5cb6fdf2292e61d411315549391a0d1f64e4e3097e
+324a7a3b2e0135f4226ffdd473e4099fd9e477a74230cdc35de69e84c0f9d907
+```
+
+Remove all unused images from local storage with label version 1.0
+```
+$ sudo podman image prune -a -f --filter label=version=1.0
+e813d2135f17fadeffeea8159a34cfdd4c30b98d8111364b913a91fd930643e9
+5e6572320437022e2746467ddf5b3561bf06e099e8e6361df27e0b2a7ed0b17b
+58fda2abf5042b35dfe04e5f8ee458a3cc26375bf309efb42c078b551a2055c7
+6d2bd30fe924d3414b64bd3920760617e6ced872364bc3bc6959a623252da002
+33d1c829be64a1e1d379caf4feec1f05a892c3ef7aa82c0be53d3c08a96c59c5
+f9f0a8a58c9e02a2b3250b88cc5c95b1e10245ca2c4161d19376580aaa90f55c
+1ef14d5ede80db78978b25ad677fd3e897a578c3af614e1fda608d40c8809707
+45e1482040e441a521953a6da2eca9bafc769e15667a07c23720d6e0cafc3ab2
+
+```
+
## SEE ALSO
podman(1), podman-images
diff --git a/docs/source/markdown/podman-images.1.md b/docs/source/markdown/podman-images.1.md
index 3ac07fc43..21fca1dbd 100644
--- a/docs/source/markdown/podman-images.1.md
+++ b/docs/source/markdown/podman-images.1.md
@@ -52,6 +52,10 @@ Filter output based on conditions provided
Change the default output format. This can be of a supported type like 'json'
or a Go template.
+**--history**
+
+Display the history of image names. If an image gets re-tagged or untagged, then the image name history gets prepended (latest image first). This is especially useful when undoing a tag operation or an image does not contain any name because it has been untagged.
+
**--noheading**, **-n**
Omit the table headings from the listing of images.
diff --git a/docs/source/markdown/podman-pod-prune.1.md b/docs/source/markdown/podman-pod-prune.1.md
index f79961b2f..478f563c3 100644
--- a/docs/source/markdown/podman-pod-prune.1.md
+++ b/docs/source/markdown/podman-pod-prune.1.md
@@ -1,16 +1,21 @@
% podman-pod-prune(1)
## NAME
-podman-pod-prune - Remove all stopped pods
+podman-pod-prune - Remove all stopped pods and their containers
## SYNOPSIS
**podman pod prune**
## DESCRIPTION
-**podman pod prune** removes all stopped pods from local storage.
+**podman pod prune** removes all stopped pods and their containers from local storage.
+
+## OPTIONS
+
+**--force** **-f**
+Force removal of all running pods and their containers. The default is false.
## EXAMPLES
-Remove all stopped pods from local storage
+Remove all stopped pods and their containers from local storage
```
$ sudo podman pod prune
22b8813332948064b6566370088c5e0230eeaf15a58b1c5646859fd9fc364fe7
diff --git a/docs/source/markdown/podman-pod-rm.1.md b/docs/source/markdown/podman-pod-rm.1.md
index 6659534b4..14da2071f 100644
--- a/docs/source/markdown/podman-pod-rm.1.md
+++ b/docs/source/markdown/podman-pod-rm.1.md
@@ -1,13 +1,13 @@
% podman-pod-rm(1)
## NAME
-podman\-pod\-rm - Remove one or more pods
+podman\-pod\-rm - Remove one or more stopped pods and containers
## SYNOPSIS
**podman pod rm** [*options*] *pod*
## DESCRIPTION
-**podman pod rm** will remove one or more pods from the host. The pod name or ID can be used. The \-f option stops all containers and then removes them before removing the pod. Without the \-f option, a pod cannot be removed if it has associated containers.
+**podman pod rm** will remove one or more stopped pods and their containers from the host. The pod name or ID can be used. The \-f option stops all containers and then removes them before removing the pod.
## OPTIONS
@@ -15,6 +15,12 @@ podman\-pod\-rm - Remove one or more pods
Remove all pods. Can be used in conjunction with \-f as well.
+**--ignore**, **-i**
+
+Ignore errors when specified pods are not in the container store. A user might
+have decided to manually remove a pod which would lead to a failure during the
+ExecStop directive of a systemd service referencing that pod.
+
**--latest**, **-l**
Instead of providing the pod name or ID, remove the last created pod.
diff --git a/docs/source/markdown/podman-pod-stop.1.md b/docs/source/markdown/podman-pod-stop.1.md
index b3ce47d72..73c347cec 100644
--- a/docs/source/markdown/podman-pod-stop.1.md
+++ b/docs/source/markdown/podman-pod-stop.1.md
@@ -15,6 +15,12 @@ Stop containers in one or more pods. You may use pod IDs or names as input.
Stops all pods
+**--ignore**, **-i**
+
+Ignore errors when specified pods are not in the container store. A user might
+have decided to manually remove a pod which would lead to a failure during the
+ExecStop directive of a systemd service referencing that pod.
+
**--latest**, **-l**
Instead of providing the pod name or ID, stop the last created pod.
diff --git a/docs/source/markdown/podman-pod.1.md b/docs/source/markdown/podman-pod.1.md
index b3d002a06..e5a8207e9 100644
--- a/docs/source/markdown/podman-pod.1.md
+++ b/docs/source/markdown/podman-pod.1.md
@@ -18,10 +18,10 @@ podman pod is a set of subcommands that manage pods, or groups of containers.
| inspect | [podman-pod-inspect(1)](podman-pod-inspect.1.md) | Displays information describing a pod. |
| kill | [podman-pod-kill(1)](podman-pod-kill.1.md) | Kill the main process of each container in one or more pods. |
| pause | [podman-pod-pause(1)](podman-pod-pause.1.md) | Pause one or more pods. |
-| prune | [podman-pod-prune(1)](podman-pod-prune.1.md) | Remove all stopped pods. |
+| prune | [podman-pod-prune(1)](podman-pod-prune.1.md) | Remove all stopped pods and their containers. |
| ps | [podman-pod-ps(1)](podman-pod-ps.1.md) | Prints out information about pods. |
| restart | [podman-pod-restart(1)](podman-pod-restart.1.md) | Restart one or more pods. |
-| rm | [podman-pod-rm(1)](podman-pod-rm.1.md) | Remove one or more pods. |
+| rm | [podman-pod-rm(1)](podman-pod-rm.1.md) | Remove one or more stopped pods and containers. |
| start | [podman-pod-start(1)](podman-pod-start.1.md) | Start one or more pods. |
| stats | [podman-pod-stats(1)](podman-pod-stats.1.md) | Display a live stream of resource usage stats for containers in one or more pods. |
| stop | [podman-pod-stop(1)](podman-pod-stop.1.md) | Stop one or more pods. |
diff --git a/docs/source/markdown/podman-rm.1.md b/docs/source/markdown/podman-rm.1.md
index c1663129c..782feac6f 100644
--- a/docs/source/markdown/podman-rm.1.md
+++ b/docs/source/markdown/podman-rm.1.md
@@ -18,6 +18,10 @@ Running or unusable containers will not be removed without the `-f` option.
Remove all containers. Can be used in conjunction with -f as well.
+**--cidfile**
+
+Read container ID from the specified file and remove the container. Can be specified multiple times.
+
**--force**, **-f**
Force the removal of running and paused containers. Forcing a container removal also
@@ -26,6 +30,12 @@ Containers could have been created by a different container engine.
In addition, forcing can be used to remove unusable containers, e.g. containers
whose OCI runtime has become unavailable.
+**--ignore**, **-i**
+
+Ignore errors when specified containers are not in the container store. A user
+might have decided to manually remove a container which would lead to a failure
+during the ExecStop directive of a systemd service referencing that container.
+
**--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
@@ -50,11 +60,17 @@ Remove a container by its name *mywebserver*
```
podman rm mywebserver
```
+
Remove several containers by name and container id.
```
podman rm mywebserver myflaskserver 860a4b23
```
+Remove several containers reading their IDs from files.
+```
+podman rm --cidfile ./cidfile-1 --cidfile /home/user/cidfile-2
+```
+
Forcibly remove a container by container ID.
```
podman rm -f 860a4b23
diff --git a/docs/source/markdown/podman-run.1.md b/docs/source/markdown/podman-run.1.md
index d487af235..e1177cb34 100644
--- a/docs/source/markdown/podman-run.1.md
+++ b/docs/source/markdown/podman-run.1.md
@@ -860,6 +860,10 @@ create one.
* [`rw`|`ro`]
* [`z`|`Z`]
* [`[r]shared`|`[r]slave`|`[r]private`]
+* [`[r]bind`]
+* [`noexec`|`exec`]
+* [`nodev`|`dev`]
+* [`nosuid`|`suid`]
The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The volume
will be mounted into the container at this directory.
@@ -913,6 +917,25 @@ where source dir is mounted on) has to have right propagation properties. For
shared volumes, source mount point has to be shared. And for slave volumes,
source mount has to be either shared or slave.
+If you want to recursively mount a volume and all of it's submounts into a
+container, then you can use the `rbind` option. By default the bind option is
+used, and submounts of the source directory will not be mounted into the
+container.
+
+Mounting the volume with the `nosuid` options means that SUID applications on
+the volume will not be able to change their privilege. By default volumes
+are mounted with `nosuid`.
+
+Mounting the volume with the noexec option means that no executables on the
+volume will be able to executed within the container.
+
+Mounting the volume with the nodev option means that no devices on the volume
+will be able to be used by processes within the container. By default volumes
+are mounted with `nodev`.
+
+If the <source-dir> is a mount point, then "dev", "suid", and "exec" options are
+ignored by the kernel.
+
Use `df <source-dir>` to figure out the source mount and then use
`findmnt -o TARGET,PROPAGATION <source-mount-dir>` to figure out propagation
properties of source mount. If `findmnt` utility is not available, then one
diff --git a/docs/source/markdown/podman-stop.1.md b/docs/source/markdown/podman-stop.1.md
index b5ea670b0..3b5f17057 100644
--- a/docs/source/markdown/podman-stop.1.md
+++ b/docs/source/markdown/podman-stop.1.md
@@ -21,6 +21,16 @@ container and also via command line when creating the container.
Stop all running containers. This does not include paused containers.
+**--cidfile**
+
+Read container ID from the specified file and remove the container. Can be specified multiple times.
+
+**--ignore**, **-i**
+
+Ignore errors when specified containers are not in the container store. A user
+might have decided to manually remove a container which would lead to a failure
+during the ExecStop directive of a systemd service referencing that container.
+
**--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
@@ -40,6 +50,10 @@ podman stop 860a4b235279
podman stop mywebserver 860a4b235279
+podman stop --cidfile /home/user/cidfile-1
+
+podman stop --cidfile /home/user/cidfile-1 --cidfile ./cidfile-2
+
podman stop --timeout 2 860a4b235279
podman stop -a
diff --git a/docs/source/markdown/podman-system-reset.1.md b/docs/source/markdown/podman-system-reset.1.md
new file mode 100644
index 000000000..432f275f4
--- /dev/null
+++ b/docs/source/markdown/podman-system-reset.1.md
@@ -0,0 +1,25 @@
+% podman-system-reset(1)
+
+## NAME
+podman\-system\-reset - Reset storage back to initial state
+
+## SYNOPSIS
+**podman system reset**
+
+## DESCRIPTION
+**podman system reset** removes all pods, containers, images and volumes.
+
+## OPTIONS
+**--force**, **-f**
+
+Do not prompt for confirmation
+
+**--help**, **-h**
+
+Print usage statement
+
+## SEE ALSO
+`podman(1)`, `podman-system(1)`
+
+## HISTORY
+November 2019, Originally compiled by Dan Walsh (dwalsh at redhat dot com)
diff --git a/docs/source/markdown/podman-system.1.md b/docs/source/markdown/podman-system.1.md
index bbd541066..1af97290d 100644
--- a/docs/source/markdown/podman-system.1.md
+++ b/docs/source/markdown/podman-system.1.md
@@ -15,9 +15,10 @@ The system command allows you to manage the podman systems
| ------- | --------------------------------------------------- | ---------------------------------------------------------------------------- |
| df | [podman-system-df(1)](podman-system-df.1.md) | Show podman disk usage. |
| info | [podman-system-info(1)](podman-info.1.md) | Displays Podman related system information. |
-| prune | [podman-system-prune(1)](podman-system-prune.1.md) | Remove all unused container, image and volume data |
-| renumber | [podman-system-renumber(1)](podman-system-renumber.1.md)| Migrate lock numbers to handle a change in maximum number of locks. |
| migrate | [podman-system-migrate(1)](podman-system-migrate.1.md)| Migrate existing containers to a new podman version. |
+| prune | [podman-system-prune(1)](podman-system-prune.1.md) | Remove all unused container, image and volume data. |
+| renumber | [podman-system-renumber(1)](podman-system-renumber.1.md)| Migrate lock numbers to handle a change in maximum number of locks. |
+| reset | [podman-system-reset(1)](podman-system-reset.1.md) | Reset storage back to initial state. |
## SEE ALSO
podman(1)
diff --git a/docs/source/markdown/podman-version.1.md b/docs/source/markdown/podman-version.1.md
index 4499f6338..de22c4800 100644
--- a/docs/source/markdown/podman-version.1.md
+++ b/docs/source/markdown/podman-version.1.md
@@ -34,8 +34,8 @@ OS/Arch: linux/amd64
Filtering out only the version:
```
-$ podman version --format '{{.Version}}'
-0.11.2
+$ podman version --format '{{.Client.Version}}'
+1.6.3
```
## SEE ALSO
diff --git a/docs/source/markdown/podman.1.md b/docs/source/markdown/podman.1.md
index f6fa1a457..c62f54fbb 100644
--- a/docs/source/markdown/podman.1.md
+++ b/docs/source/markdown/podman.1.md
@@ -21,10 +21,6 @@ created by the other.
## GLOBAL OPTIONS
-**--help**, **-h**
-
-Print usage statement
-
**--cgroup-manager**=*manager*
CGroup manager to use for container cgroups. Supported values are cgroupfs or systemd. Default is systemd unless overridden in the libpod.conf file.
@@ -32,6 +28,17 @@ CGroup manager to use for container cgroups. Supported values are cgroupfs or sy
Note: Setting this flag can cause certain commands to break when called on containers previously created by the other CGroup manager type.
Note: CGroup manager is not supported in rootless mode when using CGroups Version V1.
+**--cni-config-dir**
+Path of the configuration directory for CNI networks. (Default: `/etc/cni/net.d`)
+
+**--config**
+Path of a libpod config file detailing container server configuration options
+
+Default libpod config file is /usr/share/containers/libpod.conf. Override file is in /etc/containers/libpod.conf. In rootless mode the config file will be read from $HOME/.config/containers/libpod.conf.
+
+**--conmon**
+Path of the conmon binary (Default path is configured in `libpod.conf`)
+
**--cpu-profile**=*path*
Path to where the cpu performance results should be written
@@ -40,6 +47,10 @@ Path to where the cpu performance results should be written
Backend to use for storing events. Allowed values are **file**, **journald**, and **none**.
+**--help**, **-h**
+
+Print usage statement
+
**--hooks-dir**=*path*
Each `*.json` file in the path configures a hook for Podman containers. For more details on the syntax of the JSON files and the semantics of hook injection, see `oci-hooks(5)`. Podman and libpod currently support both the 1.0.0 and 0.1.0 hook schemas, although the 0.1.0 schema is deprecated.
@@ -58,30 +69,30 @@ Podman and libpod currently support an additional `precreate` state which is cal
**--log-level**=*level*
-Log messages above specified level: debug, info, warn, error (default), fatal or panic
+Log messages above specified level: debug, info, warn, error (default), fatal or panic (default: "error")
**--namespace**=*namespace*
Set libpod namespace. Namespaces are used to separate groups of containers and pods in libpod's state.
When namespace is set, created containers and pods will join the given namespace, and only containers and pods in the given namespace will be visible to Podman.
+**--network-cmd-path**=*path*
+Path to the command binary to use for setting up a network. It is currently only used for setting up a slirp4netns network. If "" is used then the binary is looked up using the $PATH environment variable.
+
**--root=***value*
Storage root dir in which data, including images, is stored (default: "/var/lib/containers/storage" for UID 0, "$HOME/.local/share/containers/storage" for other users).
-Default root dir is configured in /etc/containers/storage.conf.
+Default root dir is configured in `/etc/containers/storage.conf`.
**--runroot**=*value*
Storage state directory where all state information is stored (default: "/var/run/containers/storage" for UID 0, "/var/run/user/$UID/run" for other users).
-Default state dir is configured in /etc/containers/storage.conf.
+Default state dir is configured in `/etc/containers/storage.conf`.
**--runtime**=*value*
Name of the OCI runtime as specified in libpod.conf or absolute path to the OCI compatible binary used to run containers.
-**--network-cmd-path**=*path*
-Path to the command binary to use for setting up a network. It is currently only used for setting up a slirp4netns network. If "" is used then the binary is looked up using the $PATH environment variable.
-
**--storage-driver**=*value*
Storage driver. The default storage driver for UID 0 is configured in /etc/containers/storage.conf (`$HOME/.config/containers/storage.conf` in rootless mode), and is *vfs* for non-root users when *fuse-overlayfs* is not available. The `STORAGE_DRIVER` environment variable overrides the default. The --storage-driver specified driver overrides all.
@@ -95,10 +106,16 @@ Storage driver option, Default storage driver options are configured in /etc/con
**--syslog**
-output logging information to syslog as well as the console
+Output logging information to syslog as well as the console.
On remote clients, logging is directed to the file ~/.config/containers/podman.log
+**--tmpdir**
+
+Path to the tmp directory, for libpod runtime content.
+
+NOTE --tmpdir is not used for the temporary storage of downloaded images. Use the environment variable `TMPDIR` to change the temporary storage location of downloaded container images. Podman defaults to use `/var/tmp`.
+
**--version**, **-v**
Print the version
diff --git a/docs/source/pod.rst b/docs/source/pod.rst
index 391686ce5..2df377762 100644
--- a/docs/source/pod.rst
+++ b/docs/source/pod.rst
@@ -11,13 +11,13 @@ Pod
:doc:`pause <markdown/podman-pause.1>` Pause one or more pods
-:doc:`prune <markdown/podman-pod-prune.1>` Remove all stopped pods
+:doc:`prune <markdown/podman-pod-prune.1>` Remove all stopped pods and their containers
:doc:`ps <markdown/podman-pod-ps.1>` List pods
:doc:`restart <markdown/podman-pod-restart.1>` Restart one or more pods
-:doc:`rm <markdown/podman-pod-rm.1>` Remove one or more pods
+:doc:`rm <markdown/podman-pod-rm.1>` Remove one or more stopped pods and containers
:doc:`start <markdown/podman-pod-start.1>` Start one or more pods
diff --git a/go.mod b/go.mod
index 3c0b6359b..82be38291 100644
--- a/go.mod
+++ b/go.mod
@@ -10,11 +10,11 @@ require (
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784
github.com/containernetworking/plugins v0.8.2
- github.com/containers/buildah v1.11.5
+ github.com/containers/buildah v1.11.6
github.com/containers/conmon v2.0.2+incompatible // indirect
github.com/containers/image/v5 v5.0.0
github.com/containers/psgo v1.3.2
- github.com/containers/storage v1.13.5
+ github.com/containers/storage v1.15.2
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b
@@ -44,13 +44,13 @@ require (
github.com/onsi/gomega v1.7.1
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
- github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158
+ github.com/opencontainers/runc v1.0.0-rc9
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7
github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.3.0
github.com/opentracing/opentracing-go v1.1.0
github.com/pkg/errors v0.8.1
- github.com/pkg/profile v1.3.0
+ github.com/pkg/profile v1.4.0
github.com/pmezard/go-difflib v1.0.0
github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f
github.com/sirupsen/logrus v1.4.2
@@ -71,7 +71,7 @@ require (
google.golang.org/appengine v1.6.1 // indirect
google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/yaml.v2 v2.2.5
+ gopkg.in/yaml.v2 v2.2.7
k8s.io/api v0.0.0-20190813020757-36bff7324fb7
k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab
diff --git a/go.sum b/go.sum
index 88e1846d7..b166b9976 100644
--- a/go.sum
+++ b/go.sum
@@ -21,6 +21,8 @@ github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.7-0.20191101173118-65519b62243c h1:YMP6olTU903X3gxQJckdmiP8/zkSMq4kN3uipsU9XjU=
github.com/Microsoft/hcsshim v0.8.7-0.20191101173118-65519b62243c/go.mod h1:7xhjOwRV2+0HXGmM0jxaEu+ZiXJFoVZOTfL/dmqbrD8=
+github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg=
+github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
@@ -76,6 +78,8 @@ github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982 h1:5WUe09k2s
github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982/go.mod h1:eGWB4tLoo0hIBuytQpvgUC0hk2mvl2ofaYBeDsU/qoc=
github.com/containers/buildah v1.11.5 h1:bVpkaVlvA7G+1mBDAcX6yf7jNZJ/ZrrAHDt4WCx2i8E=
github.com/containers/buildah v1.11.5/go.mod h1:bfNPqLO8GnI0qMPmI6MHSpQNK+a3TH9syYsRg+iqhRw=
+github.com/containers/buildah v1.11.6 h1:PhlF++LAezRtOKHfKhBlo8DLvpMQIvU/K2VfAhknadE=
+github.com/containers/buildah v1.11.6/go.mod h1:02+o3ZTICaPyP0QcQFoQd07obLMdAecSnFN2kDhcqNo=
github.com/containers/conmon v2.0.2+incompatible h1:h2HCdd/EBpwFn7RT82Y2GyXnVUHWxk1Jm4cESSZG4P8=
github.com/containers/conmon v2.0.2+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.0.0 h1:arnXgbt1ucsC/ndtSpiQY87rA0UjhF+/xQnPzqdBDn4=
@@ -88,6 +92,12 @@ github.com/containers/storage v1.13.4 h1:j0bBaJDKbUHtAW1MXPFnwXJtqcH+foWeuXK1YaB
github.com/containers/storage v1.13.4/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA=
github.com/containers/storage v1.13.5 h1:/SUzGeOP2HDijpF7Yur21Ch6WTZC1BNeZF917CWcp5c=
github.com/containers/storage v1.13.5/go.mod h1:HELz8Sn+UVbPaUZMI8RvIG9doD4y4z6Gtg4k7xdd2ZY=
+github.com/containers/storage v1.14.0 h1:LbX6WZaDmkXt4DT4xWIg3YXAWd6oA4K9Fi6/KG1xt84=
+github.com/containers/storage v1.14.0/go.mod h1:qGPsti/qC1xxX+xcpHfiTMT+8ThVE2Jf83wFHHqkDAY=
+github.com/containers/storage v1.15.0 h1:QNW7jJ94ccGcAbFIOSMHUAsUxvHceb71ecLye9EDrkk=
+github.com/containers/storage v1.15.0/go.mod h1:qGPsti/qC1xxX+xcpHfiTMT+8ThVE2Jf83wFHHqkDAY=
+github.com/containers/storage v1.15.2 h1:hLgafU4tuyQk/smMkXZfHTS8FtAQsqQvfWCp4bsgjuw=
+github.com/containers/storage v1.15.2/go.mod h1:v0lq/3f+cXH3Y/HiDaFYRR0zilwDve7I4W7U5xQxvF8=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-iptables v0.4.2 h1:KH0EwId05JwWIfb96gWvkiT2cbuOu8ygqUaB+yPAwIg=
@@ -265,6 +275,10 @@ github.com/klauspost/compress v1.7.2 h1:liMOoeIvFpr9kEvalrZ7VVBA4wGf7zfOgwBjzz/5
github.com/klauspost/compress v1.7.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.8.1 h1:oygt2ychZFHOB6M9gUgajzgKrwRgHbGC77NwA4COVgI=
github.com/klauspost/compress v1.8.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY=
+github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.4 h1:xhvAeUPQ2drNUhKtrGdTGNvV9nNafHMUkRyLkzxJoB4=
+github.com/klauspost/compress v1.9.4/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
@@ -345,6 +359,8 @@ github.com/opencontainers/runc v1.0.0-rc8 h1:dDCFes8Hj1r/i5qnypONo5jdOme/8HWZC/a
github.com/opencontainers/runc v1.0.0-rc8/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158 h1:/A6bAdnSZoTQmKml3MdHAnSEPnBAQeigNBl4sxnfaaQ=
github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
+github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 h1:Dliu5QO+4JYWu/yMshaMU7G3JN2POGpwjJN7gjy10Go=
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -371,6 +387,8 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.3.0 h1:OQIvuDgm00gWVWGTf4m4mCt6W1/0YqU7Ntg0mySWgaI=
github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/pkg/profile v1.4.0 h1:uCmaf4vVbWAOZz36k1hrQD7ijGRzLwaME8Am/7a4jZI=
+github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -601,6 +619,8 @@ gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
+gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v0.0.0-20190624233834-05ebafbffc79/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
diff --git a/hack/get_release_info.sh b/hack/get_release_info.sh
index b75751170..c2be6a270 100755
--- a/hack/get_release_info.sh
+++ b/hack/get_release_info.sh
@@ -4,9 +4,10 @@
# a script allows uniform behavior across multiple environments and
# distributions. The script expects a single argument, as reflected below.
-set -e
+set -euo pipefail
-cd "${GOSRC:-$(dirname $0)/../}"
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+cd "${GOSRC:-${DIR}/../}"
valid_args() {
REGEX='^\s+[[:upper:]]+\*[)]'
diff --git a/install.md b/install.md
index 39b639176..218994587 100644
--- a/install.md
+++ b/install.md
@@ -68,7 +68,7 @@ The latest builds are available in a PPA. Take note of the [Build and Run Depend
```bash
sudo apt-get update -qq
-sudo apt-get install -qq -y software-properties-common uidmap
+sudo apt-get install -qq -y software-properties-common uidmap slirp4netns
sudo add-apt-repository -y ppa:projectatomic/ppa
sudo apt-get update -qq
sudo apt-get -qq -y install podman
@@ -95,7 +95,9 @@ system](https://bodhi.fedoraproject.org/updates/?packages=podman).
**Required**
-Fedora, CentOS, RHEL, and related distributions:
+Fedora, CentOS, RHEL, and related distributions you should try to run
+`make package-install` which will install dependencies, build the source,
+produce rpms for the current platform and install them in the end.
```bash
sudo yum install -y \
diff --git a/libpod/boltdb_state_linux.go b/libpod/boltdb_state_linux.go
index 09a9be606..6ccda71bd 100644
--- a/libpod/boltdb_state_linux.go
+++ b/libpod/boltdb_state_linux.go
@@ -3,6 +3,8 @@
package libpod
import (
+ "github.com/containers/libpod/libpod/define"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -25,8 +27,12 @@ func replaceNetNS(netNSPath string, ctr *Container, newState *ContainerState) er
if err == nil {
newState.NetNS = ns
} else {
+ if ctr.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
+ return errors.Wrapf(err, "error joning network namespace of container %s", ctr.ID())
+ }
+
logrus.Errorf("error joining network namespace for container %s: %v", ctr.ID(), err)
- ctr.valid = false
+ ctr.state.NetNS = nil
}
}
} else {
diff --git a/libpod/config/config.go b/libpod/config/config.go
index 5b4b57f3a..f1fa70fbc 100644
--- a/libpod/config/config.go
+++ b/libpod/config/config.go
@@ -469,6 +469,9 @@ func NewConfig(userConfigPath string) (*Config, error) {
if defaultConfig, err := defaultConfigFromMemory(); err != nil {
return nil, errors.Wrapf(err, "error generating default config from memory")
} else {
+ // Check if we need to switch to cgroupfs and logger=file on rootless.
+ defaultConfig.checkCgroupsAndLogger()
+
if err := config.mergeConfig(defaultConfig); err != nil {
return nil, errors.Wrapf(err, "error merging default config from memory")
}
@@ -487,9 +490,6 @@ func NewConfig(userConfigPath string) (*Config, error) {
return nil, errors.Wrapf(define.ErrInvalidArg, "volume path must be an absolute path - instead got %q", config.VolumePath)
}
- // Check if we need to switch to cgroupfs on rootless.
- config.checkCgroupsAndAdjustConfig()
-
return config, nil
}
@@ -524,11 +524,13 @@ func systemConfigs() ([]string, error) {
return configs, nil
}
-// checkCgroupsAndAdjustConfig checks if we're running rootless with the systemd
+// checkCgroupsAndLogger checks if we're running rootless with the systemd
// cgroup manager. In case the user session isn't available, we're switching the
-// cgroup manager to cgroupfs. Note, this only applies to rootless.
-func (c *Config) checkCgroupsAndAdjustConfig() {
- if !rootless.IsRootless() || c.CgroupManager != define.SystemdCgroupsManager {
+// cgroup manager to cgroupfs and the events logger backend to 'file'.
+// Note, this only applies to rootless.
+func (c *Config) checkCgroupsAndLogger() {
+ if !rootless.IsRootless() || (c.CgroupManager !=
+ define.SystemdCgroupsManager && c.EventsLogger == "file") {
return
}
@@ -543,7 +545,8 @@ func (c *Config) checkCgroupsAndAdjustConfig() {
logrus.Warningf("The cgroups manager is set to systemd but there is no systemd user session available")
logrus.Warningf("For using systemd, you may need to login using an user session")
logrus.Warningf("Alternatively, you can enable lingering with: `loginctl enable-linger %d` (possibly as root)", rootless.GetRootlessUID())
- logrus.Warningf("Falling back to --cgroup-manager=cgroupfs")
+ logrus.Warningf("Falling back to --cgroup-manager=cgroupfs and --events-backend=file")
c.CgroupManager = define.CgroupfsCgroupsManager
+ c.EventsLogger = "file"
}
}
diff --git a/libpod/container.go b/libpod/container.go
index 4f7fc067e..dcec3ee50 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -6,6 +6,7 @@ import (
"net"
"os"
"path/filepath"
+ "strings"
"time"
"github.com/containernetworking/cni/pkg/types"
@@ -1072,7 +1073,14 @@ func (c *Container) CGroupPath() (string, error) {
case define.SystemdCgroupsManager:
if rootless.IsRootless() {
uid := rootless.GetRootlessUID()
- return filepath.Join(c.config.CgroupParent, fmt.Sprintf("user-%d.slice/user@%d.service/user.slice", uid, uid), createUnitName("libpod", c.ID())), nil
+ parts := strings.SplitN(c.config.CgroupParent, "/", 2)
+
+ dir := ""
+ if len(parts) > 1 {
+ dir = parts[1]
+ }
+
+ return filepath.Join(parts[0], fmt.Sprintf("user-%d.slice/user@%d.service/user.slice/%s", uid, uid, dir), createUnitName("libpod", c.ID())), nil
}
return filepath.Join(c.config.CgroupParent, createUnitName("libpod", c.ID())), nil
default:
@@ -1146,7 +1154,7 @@ func (c *Container) NetworkDisabled() (bool, error) {
if err != nil {
return false, err
}
- return networkDisabled(container)
+ return container.NetworkDisabled()
}
return networkDisabled(c)
diff --git a/libpod/container_api.go b/libpod/container_api.go
index b8cfe02f6..153a1d628 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -404,6 +404,11 @@ func (c *Container) Mount() (string, error) {
return "", err
}
}
+
+ if c.state.State == define.ContainerStateRemoving {
+ return "", errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID())
+ }
+
defer c.newContainerEvent(events.Mount)
return c.mount()
}
@@ -488,7 +493,12 @@ func (c *Container) Export(path string) error {
return err
}
}
- defer c.newContainerEvent(events.Export)
+
+ if c.state.State == define.ContainerStateRemoving {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID())
+ }
+
+ defer c.newContainerEvent(events.Mount)
return c.export(path)
}
@@ -674,6 +684,10 @@ func (c *Container) Refresh(ctx context.Context) error {
}
}
+ if c.state.State == define.ContainerStateRemoving {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot refresh containers that are being removed")
+ }
+
wasCreated := false
if c.state.State == define.ContainerStateCreated {
wasCreated = true
@@ -819,7 +833,6 @@ func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointO
return err
}
}
- defer c.newContainerEvent(events.Checkpoint)
return c.checkpoint(ctx, options)
}
diff --git a/libpod/container_commit.go b/libpod/container_commit.go
index 42f298a81..a0ba57f4f 100644
--- a/libpod/container_commit.go
+++ b/libpod/container_commit.go
@@ -3,7 +3,6 @@ package libpod
import (
"context"
"fmt"
- "os"
"strings"
"github.com/containers/buildah"
@@ -12,6 +11,7 @@ import (
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/libpod/image"
+ libpodutil "github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -32,10 +32,6 @@ type ContainerCommitOptions struct {
// Commit commits the changes between a container and its image, creating a new
// image
func (c *Container) Commit(ctx context.Context, destImage string, options ContainerCommitOptions) (*image.Image, error) {
- var (
- isEnvCleared, isLabelCleared, isExposeCleared, isVolumeCleared bool
- )
-
if c.config.Rootfs != "" {
return nil, errors.Errorf("cannot commit a container that uses an exploded rootfs")
}
@@ -51,7 +47,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
if c.state.State == define.ContainerStateRunning && options.Pause {
if err := c.pause(); err != nil {
- return nil, errors.Wrapf(err, "error pausing container %q", c.ID())
+ return nil, errors.Wrapf(err, "error pausing container %q to commit", c.ID())
}
defer func() {
if err := c.unpause(); err != nil {
@@ -103,7 +99,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
}
// Expose ports
for _, p := range c.config.PortMappings {
- importBuilder.SetPort(fmt.Sprintf("%d", p.ContainerPort))
+ importBuilder.SetPort(fmt.Sprintf("%d/%s", p.ContainerPort, p.Protocol))
}
// Labels
for k, v := range c.Labels() {
@@ -111,7 +107,9 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
}
// No stop signal
// User
- importBuilder.SetUser(c.User())
+ if c.config.User != "" {
+ importBuilder.SetUser(c.config.User)
+ }
// Volumes
if options.IncludeVolumes {
for _, v := range c.config.UserVolumes {
@@ -119,107 +117,76 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
importBuilder.AddVolume(v)
}
}
- }
- // Workdir
- importBuilder.SetWorkDir(c.Spec().Process.Cwd)
-
- genCmd := func(cmd string) []string {
- trim := func(cmd []string) []string {
- if len(cmd) == 0 {
- return cmd
+ } else {
+ // Only include anonymous named volumes added by the user by
+ // default.
+ for _, v := range c.config.NamedVolumes {
+ include := false
+ for _, userVol := range c.config.UserVolumes {
+ if userVol == v.Dest {
+ include = true
+ break
+ }
}
-
- retCmd := []string{}
- for _, c := range cmd {
- if len(c) >= 2 {
- if c[0] == '"' && c[len(c)-1] == '"' {
- retCmd = append(retCmd, c[1:len(c)-1])
- continue
- }
+ if include {
+ vol, err := c.runtime.GetVolume(v.Name)
+ if err != nil {
+ return nil, errors.Wrapf(err, "volume %s used in container %s has been removed", v.Name, c.ID())
+ }
+ if vol.IsCtrSpecific() {
+ importBuilder.AddVolume(v.Dest)
}
- retCmd = append(retCmd, c)
}
- return retCmd
}
- if strings.HasPrefix(cmd, "[") {
- cmd = strings.TrimPrefix(cmd, "[")
- cmd = strings.TrimSuffix(cmd, "]")
- return trim(strings.Split(cmd, ","))
- }
- return []string{"/bin/sh", "-c", cmd}
}
- // Process user changes
- for _, change := range options.Changes {
- splitChange := strings.SplitN(change, "=", 2)
- if len(splitChange) != 2 {
- splitChange = strings.SplitN(change, " ", 2)
- if len(splitChange) < 2 {
- return nil, errors.Errorf("invalid change %s format", change)
- }
- }
+ // Workdir
+ importBuilder.SetWorkDir(c.config.Spec.Process.Cwd)
- switch strings.ToUpper(splitChange[0]) {
- case "CMD":
- importBuilder.SetCmd(genCmd(splitChange[1]))
- case "ENTRYPOINT":
- importBuilder.SetEntrypoint(genCmd(splitChange[1]))
- case "ENV":
- change := strings.Split(splitChange[1], " ")
- name := change[0]
- val := ""
- if len(change) < 2 {
- change = strings.Split(change[0], "=")
- }
- if len(change) < 2 {
- var ok bool
- val, ok = os.LookupEnv(name)
- if !ok {
- return nil, errors.Errorf("invalid env variable %q: not defined in your environment", name)
- }
- } else {
- name = change[0]
- val = strings.Join(change[1:], " ")
- }
- if !isEnvCleared { // Multiple values are valid, only clear once.
- importBuilder.ClearEnv()
- isEnvCleared = true
- }
- importBuilder.SetEnv(name, val)
- case "EXPOSE":
- if !isExposeCleared { // Multiple values are valid, only clear once
- importBuilder.ClearPorts()
- isExposeCleared = true
- }
- importBuilder.SetPort(splitChange[1])
- case "LABEL":
- change := strings.Split(splitChange[1], " ")
- if len(change) < 2 {
- change = strings.Split(change[0], "=")
- }
- if len(change) < 2 {
- return nil, errors.Errorf("invalid label %s format, requires to NAME=VAL", splitChange[1])
- }
- if !isLabelCleared { // multiple values are valid, only clear once
- importBuilder.ClearLabels()
- isLabelCleared = true
- }
- importBuilder.SetLabel(change[0], strings.Join(change[1:], " "))
- case "ONBUILD":
- importBuilder.SetOnBuild(splitChange[1])
- case "STOPSIGNAL":
- // No Set StopSignal
- case "USER":
- importBuilder.SetUser(splitChange[1])
- case "VOLUME":
- if !isVolumeCleared { // multiple values are valid, only clear once
- importBuilder.ClearVolumes()
- isVolumeCleared = true
- }
- importBuilder.AddVolume(splitChange[1])
- case "WORKDIR":
- importBuilder.SetWorkDir(splitChange[1])
+ // Process user changes
+ newImageConfig, err := libpodutil.GetImageConfig(options.Changes)
+ if err != nil {
+ return nil, err
+ }
+ if newImageConfig.User != "" {
+ importBuilder.SetUser(newImageConfig.User)
+ }
+ // EXPOSE only appends
+ for port := range newImageConfig.ExposedPorts {
+ importBuilder.SetPort(port)
+ }
+ // ENV only appends
+ for _, env := range newImageConfig.Env {
+ splitEnv := strings.SplitN(env, "=", 2)
+ key := splitEnv[0]
+ value := ""
+ if len(splitEnv) == 2 {
+ value = splitEnv[1]
}
+ importBuilder.SetEnv(key, value)
}
+ if newImageConfig.Entrypoint != nil {
+ importBuilder.SetEntrypoint(newImageConfig.Entrypoint)
+ }
+ if newImageConfig.Cmd != nil {
+ importBuilder.SetCmd(newImageConfig.Cmd)
+ }
+ // VOLUME only appends
+ for vol := range newImageConfig.Volumes {
+ importBuilder.AddVolume(vol)
+ }
+ if newImageConfig.WorkingDir != "" {
+ importBuilder.SetWorkDir(newImageConfig.WorkingDir)
+ }
+ for k, v := range newImageConfig.Labels {
+ importBuilder.SetLabel(k, v)
+ }
+ if newImageConfig.StopSignal != "" {
+ importBuilder.SetStopSignal(newImageConfig.StopSignal)
+ }
+ for _, onbuild := range newImageConfig.OnBuild {
+ importBuilder.SetOnBuild(onbuild)
+ }
+
candidates, _, _, err := util.ResolveName(destImage, "", sc, c.runtime.store)
if err != nil {
return nil, errors.Wrapf(err, "error resolving name %q", destImage)
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 028d7601d..1e8a8a580 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -652,6 +652,11 @@ func (c *Container) removeConmonFiles() error {
return errors.Wrapf(err, "error removing container %s ctl file", c.ID())
}
+ winszFile := filepath.Join(c.bundlePath(), "winsz")
+ if err := os.Remove(winszFile); err != nil && !os.IsNotExist(err) {
+ return errors.Wrapf(err, "error removing container %s winsz file", c.ID())
+ }
+
oomFile := filepath.Join(c.bundlePath(), "oom")
if err := os.Remove(oomFile); err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "error removing container %s OOM file", c.ID())
@@ -714,7 +719,8 @@ func (c *Container) isStopped() (bool, error) {
if err != nil {
return true, err
}
- return c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused, nil
+
+ return !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused), nil
}
// save container state to the database
@@ -1052,6 +1058,8 @@ func (c *Container) initAndStart(ctx context.Context) (err error) {
// If we are ContainerStateUnknown, throw an error
if c.state.State == define.ContainerStateUnknown {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in an unknown state", c.ID())
+ } else if c.state.State == define.ContainerStateRemoving {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot start container %s as it is being removed", c.ID())
}
// If we are running, do nothing
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 26d6771b0..1b0570998 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -21,6 +21,7 @@ import (
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/buildah/pkg/secrets"
"github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/pkg/annotations"
"github.com/containers/libpod/pkg/apparmor"
"github.com/containers/libpod/pkg/cgroups"
@@ -676,6 +677,10 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State)
}
+ if c.AutoRemove() && options.TargetFile == "" {
+ return errors.Errorf("Cannot checkpoint containers that have been started with '--rm' unless '--export' is used")
+ }
+
if err := c.checkpointRestoreLabelLog("dump.log"); err != nil {
return err
}
@@ -695,6 +700,8 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
return err
}
+ defer c.newContainerEvent(events.Checkpoint)
+
if options.TargetFile != "" {
if err = c.exportCheckpoint(options.TargetFile, options.IgnoreRootfs); err != nil {
return err
@@ -766,7 +773,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
return err
}
- if (c.state.State != define.ContainerStateConfigured) && (c.state.State != define.ContainerStateExited) {
+ if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID())
}
@@ -884,7 +891,12 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// We want to have the same network namespace as before.
if c.config.CreateNetNS {
- if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), c.state.NetNS.Path()); err != nil {
+ netNSPath := ""
+ if !c.config.PostConfigureNetNS {
+ netNSPath = c.state.NetNS.Path()
+ }
+
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), netNSPath); err != nil {
return err
}
}
@@ -1004,9 +1016,24 @@ func (c *Container) makeBindMounts() error {
// We want /etc/resolv.conf and /etc/hosts from the
// other container. Unless we're not creating both of
// them.
- depCtr, err := c.runtime.state.Container(c.config.NetNsCtr)
- if err != nil {
- return errors.Wrapf(err, "error fetching dependency %s of container %s", c.config.NetNsCtr, c.ID())
+ var (
+ depCtr *Container
+ nextCtr string
+ )
+
+ // I don't like infinite loops, but I don't think there's
+ // a serious risk of looping dependencies - too many
+ // protections against that elsewhere.
+ nextCtr = c.config.NetNsCtr
+ for {
+ depCtr, err = c.runtime.state.Container(nextCtr)
+ if err != nil {
+ return errors.Wrapf(err, "error fetching dependency %s of container %s", c.config.NetNsCtr, c.ID())
+ }
+ nextCtr = depCtr.config.NetNsCtr
+ if nextCtr == "" {
+ break
+ }
}
// We need that container's bind mounts
diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go
index ab2527b3e..e7d258e21 100644
--- a/libpod/define/containerstate.go
+++ b/libpod/define/containerstate.go
@@ -25,6 +25,9 @@ const (
// ContainerStateExited indicates the the container has stopped and been
// cleaned up
ContainerStateExited ContainerStatus = iota
+ // ContainerStateRemoving indicates the container is in the process of
+ // being removed.
+ ContainerStateRemoving ContainerStatus = iota
)
// ContainerStatus returns a string representation for users
@@ -45,6 +48,8 @@ func (t ContainerStatus) String() string {
return "paused"
case ContainerStateExited:
return "exited"
+ case ContainerStateRemoving:
+ return "removing"
}
return "bad state"
}
@@ -67,6 +72,8 @@ func StringToContainerStatus(status string) (ContainerStatus, error) {
return ContainerStatePaused, nil
case ContainerStateExited.String():
return ContainerStateExited, nil
+ case ContainerStateRemoving.String():
+ return ContainerStateRemoving, nil
default:
return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status)
}
diff --git a/libpod/image/image.go b/libpod/image/image.go
index c912ac2ca..129ccd376 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -74,6 +74,11 @@ type InfoImage struct {
Layers []LayerInfo
}
+// ImageFilter is a function to determine whether a image is included
+// in command output. Images to be outputted are tested using the function.
+// A true return will include the image, a false return will exclude it.
+type ImageFilter func(*Image) bool //nolint
+
// ErrRepoTagNotFound is the error returned when the image id given doesn't match a rep tag in store
var ErrRepoTagNotFound = stderrors.New("unable to match user input to any specific repotag")
@@ -330,6 +335,21 @@ func (i *Image) Names() []string {
return i.image.Names
}
+// NamesHistory returns a string array of names previously associated with the
+// image, which may be a mixture of tags and digests
+func (i *Image) NamesHistory() []string {
+ if len(i.image.Names) > 0 && len(i.image.NamesHistory) > 0 &&
+ // We compare the latest (time-referenced) tags for equality and skip
+ // it in the history if they match to not display them twice. We have
+ // to compare like this, because `i.image.Names` (latest last) gets
+ // appended on retag, whereas `i.image.NamesHistory` gets prepended
+ // (latest first)
+ i.image.Names[len(i.image.Names)-1] == i.image.NamesHistory[0] {
+ return i.image.NamesHistory[1:]
+ }
+ return i.image.NamesHistory
+}
+
// RepoTags returns a string array of repotags associated with the image
func (i *Image) RepoTags() ([]string, error) {
var repoTags []string
@@ -765,109 +785,65 @@ func (i *Image) History(ctx context.Context) ([]*History, error) {
return nil, err
}
- // Use our layers list to find images that use any of them (or no
- // layer, since every base layer is derived from an empty layer) as its
- // topmost layer.
- interestingLayers := make(map[string]bool)
- var layer *storage.Layer
- if i.TopLayer() != "" {
- if layer, err = i.imageruntime.store.Layer(i.TopLayer()); err != nil {
- return nil, err
- }
+ // Build a mapping from top-layer to image ID.
+ images, err := i.imageruntime.GetImages()
+ if err != nil {
+ return nil, err
}
- interestingLayers[""] = true
- for layer != nil {
- interestingLayers[layer.ID] = true
- if layer.Parent == "" {
- break
+ topLayerMap := make(map[string]string)
+ for _, image := range images {
+ if _, exists := topLayerMap[image.TopLayer()]; !exists {
+ topLayerMap[image.TopLayer()] = image.ID()
}
- layer, err = i.imageruntime.store.Layer(layer.Parent)
+ }
+
+ var allHistory []*History
+ var layer *storage.Layer
+
+ // Check if we have an actual top layer to prevent lookup errors.
+ if i.TopLayer() != "" {
+ layer, err = i.imageruntime.store.Layer(i.TopLayer())
if err != nil {
return nil, err
}
}
- // Get the IDs of the images that share some of our layers. Hopefully
- // this step means that we'll be able to avoid reading the
- // configuration of every single image in local storage later on.
- images, err := i.imageruntime.GetImages()
- if err != nil {
- return nil, errors.Wrapf(err, "error getting images from store")
- }
- interestingImages := make([]*Image, 0, len(images))
- for i := range images {
- if interestingLayers[images[i].TopLayer()] {
- interestingImages = append(interestingImages, images[i])
- }
- }
+ // Iterate in reverse order over the history entries, and lookup the
+ // corresponding image ID, size and get the next later if needed.
+ numHistories := len(oci.History) - 1
+ for x := numHistories; x >= 0; x-- {
+ var size int64
- // Build a list of image IDs that correspond to our history entries.
- historyImages := make([]*Image, len(oci.History))
- if len(oci.History) > 0 {
- // The starting image shares its whole history with itself.
- historyImages[len(historyImages)-1] = i
- for i := range interestingImages {
- image, err := images[i].ociv1Image(ctx)
- if err != nil {
- return nil, errors.Wrapf(err, "error getting image configuration for image %q", images[i].ID())
- }
- // If the candidate has a longer history or no history
- // at all, then it doesn't share the portion of our
- // history that we're interested in matching with other
- // images.
- if len(image.History) == 0 || len(image.History) > len(historyImages) {
- continue
+ id := "<missing>"
+ if x == numHistories {
+ id = i.ID()
+ } else if layer != nil {
+ if !oci.History[x].EmptyLayer {
+ size = layer.UncompressedSize
}
- // If we don't include all of the layers that the
- // candidate image does (i.e., our rootfs didn't look
- // like its rootfs at any point), then it can't be part
- // of our history.
- if len(image.RootFS.DiffIDs) > len(oci.RootFS.DiffIDs) {
- continue
- }
- candidateLayersAreUsed := true
- for i := range image.RootFS.DiffIDs {
- if image.RootFS.DiffIDs[i] != oci.RootFS.DiffIDs[i] {
- candidateLayersAreUsed = false
- break
- }
- }
- if !candidateLayersAreUsed {
- continue
- }
- // If the candidate's entire history is an initial
- // portion of our history, then we're based on it,
- // either directly or indirectly.
- sharedHistory := historiesMatch(oci.History, image.History)
- if sharedHistory == len(image.History) {
- historyImages[sharedHistory-1] = images[i]
+ if imageID, exists := topLayerMap[layer.ID]; exists {
+ id = imageID
+ // Delete the entry to avoid reusing it for following history items.
+ delete(topLayerMap, layer.ID)
}
}
- }
- var (
- size int64
- sizeCount = 1
- allHistory []*History
- )
-
- for i := len(oci.History) - 1; i >= 0; i-- {
- imageID := "<missing>"
- if historyImages[i] != nil {
- imageID = historyImages[i].ID()
- }
- if !oci.History[i].EmptyLayer {
- size = img.LayerInfos()[len(img.LayerInfos())-sizeCount].Size
- sizeCount++
- }
allHistory = append(allHistory, &History{
- ID: imageID,
- Created: oci.History[i].Created,
- CreatedBy: oci.History[i].CreatedBy,
+ ID: id,
+ Created: oci.History[x].Created,
+ CreatedBy: oci.History[x].CreatedBy,
Size: size,
- Comment: oci.History[i].Comment,
+ Comment: oci.History[x].Comment,
})
+
+ if layer != nil && layer.Parent != "" && !oci.History[x].EmptyLayer {
+ layer, err = i.imageruntime.store.Layer(layer.Parent)
+ if err != nil {
+ return nil, err
+ }
+ }
}
+
return allHistory, nil
}
diff --git a/libpod/image/prune.go b/libpod/image/prune.go
index 006cbdf22..f5be8ed50 100644
--- a/libpod/image/prune.go
+++ b/libpod/image/prune.go
@@ -2,23 +2,78 @@ package image
import (
"context"
+ "strings"
+ "time"
"github.com/containers/libpod/libpod/events"
+ "github.com/containers/libpod/pkg/timetype"
"github.com/containers/storage"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
+func generatePruneFilterFuncs(filter, filterValue string) (ImageFilter, error) {
+ switch filter {
+ case "label":
+ var filterArray = strings.SplitN(filterValue, "=", 2)
+ var filterKey = filterArray[0]
+ if len(filterArray) > 1 {
+ filterValue = filterArray[1]
+ } else {
+ filterValue = ""
+ }
+ return func(i *Image) bool {
+ labels, err := i.Labels(context.Background())
+ if err != nil {
+ return false
+ }
+ for labelKey, labelValue := range labels {
+ if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
+ return true
+ }
+ }
+ return false
+ }, nil
+
+ case "until":
+ ts, err := timetype.GetTimestamp(filterValue, time.Now())
+ if err != nil {
+ return nil, err
+ }
+ seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0)
+ if err != nil {
+ return nil, err
+ }
+ until := time.Unix(seconds, nanoseconds)
+ return func(i *Image) bool {
+ if !until.IsZero() && i.Created().After((until)) {
+ return true
+ }
+ return false
+ }, nil
+
+ }
+ return nil, nil
+}
+
// GetPruneImages returns a slice of images that have no names/unused
-func (ir *Runtime) GetPruneImages(all bool) ([]*Image, error) {
+func (ir *Runtime) GetPruneImages(all bool, filterFuncs []ImageFilter) ([]*Image, error) {
var (
pruneImages []*Image
)
+
allImages, err := ir.GetRWImages()
if err != nil {
return nil, err
}
for _, i := range allImages {
+ // filter the images based on this.
+ for _, filterFunc := range filterFuncs {
+ if !filterFunc(i) {
+ continue
+ }
+ }
+
if len(i.Names()) == 0 {
pruneImages = append(pruneImages, i)
continue
@@ -38,9 +93,25 @@ func (ir *Runtime) GetPruneImages(all bool) ([]*Image, error) {
// PruneImages prunes dangling and optionally all unused images from the local
// image store
-func (ir *Runtime) PruneImages(ctx context.Context, all bool) ([]string, error) {
- var prunedCids []string
- pruneImages, err := ir.GetPruneImages(all)
+func (ir *Runtime) PruneImages(ctx context.Context, all bool, filter []string) ([]string, error) {
+ var (
+ prunedCids []string
+ filterFuncs []ImageFilter
+ )
+ for _, f := range filter {
+ filterSplit := strings.SplitN(f, "=", 2)
+ if len(filterSplit) < 2 {
+ return nil, errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f)
+ }
+
+ generatedFunc, err := generatePruneFilterFuncs(filterSplit[0], filterSplit[1])
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid filter")
+ }
+ filterFuncs = append(filterFuncs, generatedFunc)
+ }
+
+ pruneImages, err := ir.GetPruneImages(all, filterFuncs)
if err != nil {
return nil, errors.Wrap(err, "unable to get images to prune")
}
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index cba7b636a..a68338dbb 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -29,19 +29,40 @@ import (
// Get an OCICNI network config
func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP, staticMAC net.HardwareAddr) ocicni.PodNetwork {
- defaultNetwork := r.netPlugin.GetDefaultNetworkName()
+ var networkKey string
+ if len(networks) > 0 {
+ // This is inconsistent for >1 network, but it's probably the
+ // best we can do.
+ networkKey = networks[0]
+ } else {
+ networkKey = r.netPlugin.GetDefaultNetworkName()
+ }
network := ocicni.PodNetwork{
Name: name,
Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces
ID: id,
NetNS: nsPath,
RuntimeConfig: map[string]ocicni.RuntimeConfig{
- defaultNetwork: {PortMappings: ports},
+ networkKey: {PortMappings: ports},
},
}
+ // If we have extra networks, add them
+ if len(networks) > 0 {
+ network.Networks = make([]ocicni.NetAttachment, len(networks))
+ for i, netName := range networks {
+ network.Networks[i].Name = netName
+ }
+ }
+
if staticIP != nil || staticMAC != nil {
- network.Networks = []ocicni.NetAttachment{{Name: defaultNetwork}}
+ // For static IP or MAC, we need to populate networks even if
+ // it's just the default.
+ if len(networks) == 0 {
+ // If len(networks) == 0 this is guaranteed to be the
+ // default network.
+ network.Networks = []ocicni.NetAttachment{{Name: networkKey}}
+ }
var rt ocicni.RuntimeConfig = ocicni.RuntimeConfig{PortMappings: ports}
if staticIP != nil {
rt.IP = staticIP.String()
@@ -50,12 +71,7 @@ func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, port
rt.MAC = staticMAC.String()
}
network.RuntimeConfig = map[string]ocicni.RuntimeConfig{
- defaultNetwork: rt,
- }
- } else {
- network.Networks = make([]ocicni.NetAttachment, len(networks))
- for i, netName := range networks {
- network.Networks[i].Name = netName
+ networkKey: rt,
}
}
diff --git a/libpod/oci_util.go b/libpod/oci_util.go
index c1a7f1c9a..53567d2d0 100644
--- a/libpod/oci_util.go
+++ b/libpod/oci_util.go
@@ -82,12 +82,21 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
}
func getOCIRuntimeError(runtimeMsg string) error {
- r := strings.ToLower(runtimeMsg)
- if match, _ := regexp.MatchString(".*permission denied.*|.*operation not permitted.*", r); match {
- return errors.Wrapf(define.ErrOCIRuntimePermissionDenied, "%s", strings.Trim(runtimeMsg, "\n"))
+ includeFullOutput := logrus.GetLevel() == logrus.DebugLevel
+
+ if match := regexp.MustCompile("(?i).*permission denied.*|.*operation not permitted.*").FindString(runtimeMsg); match != "" {
+ errStr := match
+ if includeFullOutput {
+ errStr = runtimeMsg
+ }
+ return errors.Wrapf(define.ErrOCIRuntimePermissionDenied, "%s", strings.Trim(errStr, "\n"))
}
- if match, _ := regexp.MatchString(".*executable file not found in.*|.*no such file or directory.*", r); match {
- return errors.Wrapf(define.ErrOCIRuntimeNotFound, "%s", strings.Trim(runtimeMsg, "\n"))
+ if match := regexp.MustCompile("(?i).*executable file not found in.*|.*no such file or directory.*").FindString(runtimeMsg); match != "" {
+ errStr := match
+ if includeFullOutput {
+ errStr = runtimeMsg
+ }
+ return errors.Wrapf(define.ErrOCIRuntimeNotFound, "%s", strings.Trim(errStr, "\n"))
}
return errors.Wrapf(define.ErrOCIRuntime, "%s", strings.Trim(runtimeMsg, "\n"))
}
diff --git a/libpod/options.go b/libpod/options.go
index bfbbb9e2d..a9b775dc3 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -768,16 +768,8 @@ func WithIPCNSFrom(nsCtr *Container) CtrCreateOption {
return define.ErrCtrFinalized
}
- if !nsCtr.valid {
- return define.ErrCtrRemoved
- }
-
- if nsCtr.ID() == ctr.ID() {
- return errors.Wrapf(define.ErrInvalidArg, "must specify another container")
- }
-
- if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod {
- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
+ if err := checkDependencyContainer(nsCtr, ctr); err != nil {
+ return err
}
ctr.config.IPCNsCtr = nsCtr.ID()
@@ -796,16 +788,8 @@ func WithMountNSFrom(nsCtr *Container) CtrCreateOption {
return define.ErrCtrFinalized
}
- if !nsCtr.valid {
- return define.ErrCtrRemoved
- }
-
- if nsCtr.ID() == ctr.ID() {
- return errors.Wrapf(define.ErrInvalidArg, "must specify another container")
- }
-
- if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod {
- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
+ if err := checkDependencyContainer(nsCtr, ctr); err != nil {
+ return err
}
ctr.config.MountNsCtr = nsCtr.ID()
@@ -824,22 +808,14 @@ func WithNetNSFrom(nsCtr *Container) CtrCreateOption {
return define.ErrCtrFinalized
}
- if !nsCtr.valid {
- return define.ErrCtrRemoved
- }
-
- if nsCtr.ID() == ctr.ID() {
- return errors.Wrapf(define.ErrInvalidArg, "must specify another container")
+ if err := checkDependencyContainer(nsCtr, ctr); err != nil {
+ return err
}
if ctr.config.CreateNetNS {
return errors.Wrapf(define.ErrInvalidArg, "cannot join another container's net ns as we are making a new net ns")
}
- if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod {
- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
- }
-
ctr.config.NetNsCtr = nsCtr.ID()
return nil
@@ -856,16 +832,8 @@ func WithPIDNSFrom(nsCtr *Container) CtrCreateOption {
return define.ErrCtrFinalized
}
- if !nsCtr.valid {
- return define.ErrCtrRemoved
- }
-
- if nsCtr.ID() == ctr.ID() {
- return errors.Wrapf(define.ErrInvalidArg, "must specify another container")
- }
-
- if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod {
- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
+ if err := checkDependencyContainer(nsCtr, ctr); err != nil {
+ return err
}
if ctr.config.NoCgroups {
@@ -888,16 +856,8 @@ func WithUserNSFrom(nsCtr *Container) CtrCreateOption {
return define.ErrCtrFinalized
}
- if !nsCtr.valid {
- return define.ErrCtrRemoved
- }
-
- if nsCtr.ID() == ctr.ID() {
- return errors.Wrapf(define.ErrInvalidArg, "must specify another container")
- }
-
- if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod {
- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
+ if err := checkDependencyContainer(nsCtr, ctr); err != nil {
+ return err
}
ctr.config.UserNsCtr = nsCtr.ID()
@@ -917,16 +877,8 @@ func WithUTSNSFrom(nsCtr *Container) CtrCreateOption {
return define.ErrCtrFinalized
}
- if !nsCtr.valid {
- return define.ErrCtrRemoved
- }
-
- if nsCtr.ID() == ctr.ID() {
- return errors.Wrapf(define.ErrInvalidArg, "must specify another container")
- }
-
- if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod {
- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
+ if err := checkDependencyContainer(nsCtr, ctr); err != nil {
+ return err
}
ctr.config.UTSNsCtr = nsCtr.ID()
@@ -945,16 +897,8 @@ func WithCgroupNSFrom(nsCtr *Container) CtrCreateOption {
return define.ErrCtrFinalized
}
- if !nsCtr.valid {
- return define.ErrCtrRemoved
- }
-
- if nsCtr.ID() == ctr.ID() {
- return errors.Wrapf(define.ErrInvalidArg, "must specify another container")
- }
-
- if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod {
- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
+ if err := checkDependencyContainer(nsCtr, ctr); err != nil {
+ return err
}
ctr.config.CgroupNsCtr = nsCtr.ID()
@@ -1041,8 +985,8 @@ func WithStaticIP(ip net.IP) CtrCreateOption {
return errors.Wrapf(define.ErrInvalidArg, "cannot set a static IP if the container is not creating a network namespace")
}
- if len(ctr.config.Networks) != 0 {
- return errors.Wrapf(define.ErrInvalidArg, "cannot set a static IP if joining additional CNI networks")
+ if len(ctr.config.Networks) > 1 {
+ return errors.Wrapf(define.ErrInvalidArg, "cannot set a static IP if joining more than 1 CNI network")
}
ctr.config.StaticIP = ip
@@ -1066,8 +1010,8 @@ func WithStaticMAC(mac net.HardwareAddr) CtrCreateOption {
return errors.Wrapf(define.ErrInvalidArg, "cannot set a static MAC if the container is not creating a network namespace")
}
- if len(ctr.config.Networks) != 0 {
- return errors.Wrapf(define.ErrInvalidArg, "cannot set a static MAC if joining additional CNI networks")
+ if len(ctr.config.Networks) > 1 {
+ return errors.Wrapf(define.ErrInvalidArg, "cannot set a static MAC if joining more than 1 CNI network")
}
ctr.config.StaticMAC = mac
diff --git a/libpod/reset.go b/libpod/reset.go
new file mode 100644
index 000000000..a35b476a4
--- /dev/null
+++ b/libpod/reset.go
@@ -0,0 +1,107 @@
+package libpod
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// Reset removes all storage
+func (r *Runtime) Reset(ctx context.Context) error {
+
+ pods, err := r.GetAllPods()
+ if err != nil {
+ return err
+ }
+ for _, p := range pods {
+ if err := r.RemovePod(ctx, p, true, true); err != nil {
+ if errors.Cause(err) == define.ErrNoSuchPod {
+ continue
+ }
+ logrus.Errorf("Error removing Pod %s: %v", p.ID(), err)
+ }
+ }
+
+ ctrs, err := r.GetAllContainers()
+ if err != nil {
+ return err
+ }
+
+ for _, c := range ctrs {
+ if err := r.RemoveContainer(ctx, c, true, true); err != nil {
+ if err := r.RemoveStorageContainer(c.ID(), true); err != nil {
+ if errors.Cause(err) == define.ErrNoSuchCtr {
+ continue
+ }
+ logrus.Errorf("Error removing container %s: %v", c.ID(), err)
+ }
+ }
+ }
+
+ if err := stopPauseProcess(); err != nil {
+ logrus.Errorf("Error stopping pause process: %v", err)
+ }
+
+ ir := r.ImageRuntime()
+ images, err := ir.GetImages()
+ if err != nil {
+ return err
+ }
+
+ for _, i := range images {
+ if err := i.Remove(ctx, true); err != nil {
+ if errors.Cause(err) == define.ErrNoSuchImage {
+ continue
+ }
+ logrus.Errorf("Error removing image %s: %v", i.ID(), err)
+ }
+ }
+ volumes, err := r.state.AllVolumes()
+ if err != nil {
+ return err
+ }
+ for _, v := range volumes {
+ if err := r.RemoveVolume(ctx, v, true); err != nil {
+ if errors.Cause(err) == define.ErrNoSuchVolume {
+ continue
+ }
+ logrus.Errorf("Error removing volume %s: %v", v.config.Name, err)
+ }
+ }
+
+ _, prevError := r.store.Shutdown(true)
+ if err := os.RemoveAll(r.store.GraphRoot()); err != nil {
+ if prevError != nil {
+ logrus.Error(prevError)
+ }
+ prevError = err
+ }
+ if err := os.RemoveAll(r.store.RunRoot()); err != nil {
+ if prevError != nil {
+ logrus.Error(prevError)
+ }
+ prevError = err
+ }
+ if err := os.RemoveAll(r.config.TmpDir); err != nil {
+ if prevError != nil {
+ logrus.Error(prevError)
+ }
+ prevError = err
+ }
+ if rootless.IsRootless() {
+ configPath := filepath.Join(os.Getenv("HOME"), ".config/containers")
+ if err := os.RemoveAll(configPath); err != nil {
+ if prevError != nil {
+ logrus.Error(prevError)
+ }
+ prevError = err
+ }
+ }
+
+ return prevError
+}
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 42e6782e9..3873079ce 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -625,7 +625,8 @@ func (r *Runtime) refresh(alivePath string) error {
}
// Next refresh the state of all containers to recreate dirs and
- // namespaces, and all the pods to recreate cgroups
+ // namespaces, and all the pods to recreate cgroups.
+ // Containers, pods, and volumes must also reacquire their locks.
ctrs, err := r.state.AllContainers()
if err != nil {
return errors.Wrapf(err, "error retrieving all containers from state")
@@ -634,10 +635,14 @@ func (r *Runtime) refresh(alivePath string) error {
if err != nil {
return errors.Wrapf(err, "error retrieving all pods from state")
}
- // No locks are taken during pod and container refresh.
- // Furthermore, the pod and container refresh() functions are not
+ vols, err := r.state.AllVolumes()
+ if err != nil {
+ return errors.Wrapf(err, "error retrieving all volumes from state")
+ }
+ // No locks are taken during pod, volume, and container refresh.
+ // Furthermore, the pod/volume/container refresh() functions are not
// allowed to take locks themselves.
- // We cannot assume that any pod or container has a valid lock until
+ // We cannot assume that any pod/volume/container has a valid lock until
// after this function has returned.
// The runtime alive lock should suffice to provide mutual exclusion
// until this has run.
@@ -651,6 +656,11 @@ func (r *Runtime) refresh(alivePath string) error {
logrus.Errorf("Error refreshing pod %s: %v", pod.ID(), err)
}
}
+ for _, vol := range vols {
+ if err := vol.refresh(); err != nil {
+ logrus.Errorf("Error refreshing volume %s: %v", vol.Name(), err)
+ }
+ }
// Create a file indicating the runtime is alive and ready
file, err := os.OpenFile(alivePath, os.O_RDONLY|os.O_CREATE, 0644)
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 7069d3494..ae401013c 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -489,32 +489,19 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
- var cleanupErr error
- // Remove the container from the state
- if c.config.Pod != "" {
- // If we're removing the pod, the container will be evicted
- // from the state elsewhere
- if !removePod {
- if err := r.state.RemoveContainerFromPod(pod, c); err != nil {
- cleanupErr = err
- }
- }
- } else {
- if err := r.state.RemoveContainer(c); err != nil {
- cleanupErr = err
- }
+ // Set ContainerStateRemoving and remove exec sessions
+ c.state.State = define.ContainerStateRemoving
+ c.state.ExecSessions = nil
+
+ if err := c.save(); err != nil {
+ return errors.Wrapf(err, "unable to set container %s removing state in database", c.ID())
}
- // Set container as invalid so it can no longer be used
- c.valid = false
+ var cleanupErr error
// Clean up network namespace, cgroups, mounts
if err := c.cleanup(ctx); err != nil {
- if cleanupErr == nil {
- cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID())
- } else {
- logrus.Errorf("cleanup network, cgroups, mounts: %v", err)
- }
+ cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID())
}
// Stop the container's storage
@@ -540,6 +527,29 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
+ // Remove the container from the state
+ if c.config.Pod != "" {
+ // If we're removing the pod, the container will be evicted
+ // from the state elsewhere
+ if !removePod {
+ if err := r.state.RemoveContainerFromPod(pod, c); err != nil {
+ if cleanupErr == nil {
+ cleanupErr = err
+ } else {
+ logrus.Errorf("Error removing container %s from database: %v", c.ID(), err)
+ }
+ }
+ }
+ } else {
+ if err := r.state.RemoveContainer(c); err != nil {
+ if cleanupErr == nil {
+ cleanupErr = err
+ } else {
+ logrus.Errorf("Error removing container %s from database: %v", c.ID(), err)
+ }
+ }
+ }
+
// Deallocate the container's lock
if err := c.lock.Free(); err != nil {
if cleanupErr == nil {
@@ -549,6 +559,9 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
+ // Set container as invalid so it can no longer be used
+ c.valid = false
+
c.newContainerEvent(events.Remove)
if !removeVolume {
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index f2784c07d..abd8b581d 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -160,10 +160,11 @@ func (r *Runtime) Import(ctx context.Context, source string, reference string, c
ic := v1.ImageConfig{}
if len(changes) > 0 {
- ic, err = util.GetImageConfig(changes)
+ config, err := util.GetImageConfig(changes)
if err != nil {
return "", errors.Wrapf(err, "error adding config changes to image %q", source)
}
+ ic = config.ImageConfig
}
hist := []v1.History{
diff --git a/libpod/runtime_migrate_unsupported.go b/libpod/runtime_migrate_unsupported.go
index 1a9e46fdc..e362cca63 100644
--- a/libpod/runtime_migrate_unsupported.go
+++ b/libpod/runtime_migrate_unsupported.go
@@ -9,3 +9,7 @@ import (
func (r *Runtime) migrate(ctx context.Context) error {
return nil
}
+
+func stopPauseProcess() error {
+ return nil
+}
diff --git a/libpod/util.go b/libpod/util.go
index bae2f4eb8..30e5cd4c3 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -206,3 +206,28 @@ func DefaultSeccompPath() (string, error) {
}
return config.SeccompDefaultPath, nil
}
+
+// CheckDependencyContainer verifies the given container can be used as a
+// dependency of another container.
+// Both the dependency to check and the container that will be using the
+// dependency must be passed in.
+// It is assumed that ctr is locked, and depCtr is unlocked.
+func checkDependencyContainer(depCtr, ctr *Container) error {
+ state, err := depCtr.State()
+ if err != nil {
+ return errors.Wrapf(err, "error accessing dependency container %s state", depCtr.ID())
+ }
+ if state == define.ContainerStateRemoving {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot use container %s as a dependency as it is being removed", depCtr.ID())
+ }
+
+ if depCtr.ID() == ctr.ID() {
+ return errors.Wrapf(define.ErrInvalidArg, "must specify another container")
+ }
+
+ if ctr.config.Pod != "" && depCtr.PodID() != ctr.config.Pod {
+ return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, depCtr.ID())
+ }
+
+ return nil
+}
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
index 42b935e7c..e89b3484d 100644
--- a/libpod/volume_internal.go
+++ b/libpod/volume_internal.go
@@ -5,6 +5,7 @@ import (
"path/filepath"
"github.com/containers/libpod/libpod/define"
+ "github.com/pkg/errors"
)
// Creates a new volume
@@ -46,3 +47,14 @@ func (v *Volume) update() error {
func (v *Volume) save() error {
return v.runtime.state.SaveVolume(v)
}
+
+// Refresh volume state after a restart.
+func (v *Volume) refresh() error {
+ lock, err := v.runtime.lockManager.AllocateAndRetrieveLock(v.config.LockID)
+ if err != nil {
+ return errors.Wrapf(err, "error acquiring lock %d for volume %s", v.config.LockID, v.Name())
+ }
+ v.lock = lock
+
+ return nil
+}
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go
index 287bd8474..6d139d0bf 100644
--- a/pkg/adapter/containers.go
+++ b/pkg/adapter/containers.go
@@ -79,8 +79,18 @@ func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopVa
}
logrus.Debugf("Setting maximum stop workers to %d", maxWorkers)
- ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
- if err != nil {
+ names := cli.InputArgs
+ for _, cidFile := range cli.CIDFiles {
+ content, err := ioutil.ReadFile(cidFile)
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "error reading CIDFile")
+ }
+ id := strings.Split(string(content), "\n")[0]
+ names = append(names, id)
+ }
+
+ ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, names, r.Runtime)
+ if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
return nil, nil, err
}
@@ -203,8 +213,18 @@ func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmVa
return ok, failures, nil
}
- ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
- if err != nil {
+ names := cli.InputArgs
+ for _, cidFile := range cli.CIDFiles {
+ content, err := ioutil.ReadFile(cidFile)
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "error reading CIDFile")
+ }
+ id := strings.Split(string(content), "\n")[0]
+ names = append(names, id)
+ }
+
+ ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, names, r.Runtime)
+ if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
// Failed to get containers. If force is specified, get the containers ID
// and evict them
if !cli.Force {
@@ -215,6 +235,10 @@ func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmVa
logrus.Debugf("Evicting container %q", ctr)
id, err := r.EvictContainer(ctx, ctr, cli.Volumes)
if err != nil {
+ if cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr {
+ logrus.Debugf("Ignoring error (--allow-missing): %v", err)
+ continue
+ }
failures[ctr] = errors.Wrapf(err, "Failed to evict container: %q", id)
continue
}
@@ -232,6 +256,10 @@ func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmVa
Fn: func() error {
err := r.RemoveContainer(ctx, c, cli.Force, cli.Volumes)
if err != nil {
+ if cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr {
+ logrus.Debugf("Ignoring error (--allow-missing): %v", err)
+ return nil
+ }
logrus.Debugf("Failed to remove container %s: %s", c.ID(), err.Error())
}
return err
@@ -339,6 +367,23 @@ func (r *LocalRuntime) CreateContainer(ctx context.Context, c *cliconfig.CreateV
return ctr.ID(), nil
}
+// Select the detach keys to use from user input flag, config file, or default value
+func (r *LocalRuntime) selectDetachKeys(flagValue string) (string, error) {
+ if flagValue != "" {
+ return flagValue, nil
+ }
+
+ config, err := r.GetConfig()
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to retrive runtime config")
+ }
+ if config.DetachKeys != "" {
+ return config.DetachKeys, nil
+ }
+
+ return define.DefaultDetachKeys, nil
+}
+
// Run a libpod container
func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode int) (int, error) {
results := shared.NewIntermediateLayer(&c.PodmanCommand, false)
@@ -400,8 +445,13 @@ func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode
}
}
+ keys, err := r.selectDetachKeys(c.String("detach-keys"))
+ if err != nil {
+ return exitCode, err
+ }
+
// if the container was created as part of a pod, also start its dependencies, if any.
- if err := StartAttachCtr(ctx, ctr, outputStream, errorStream, inputStream, c.String("detach-keys"), c.Bool("sig-proxy"), true, c.IsSet("pod")); err != nil {
+ if err := StartAttachCtr(ctx, ctr, outputStream, errorStream, inputStream, keys, c.Bool("sig-proxy"), true, c.IsSet("pod")); err != nil {
// We've manually detached from the container
// Do not perform cleanup, or wait for container exit code
// Just exit immediately
@@ -433,7 +483,8 @@ func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode
if c.IsSet("rm") {
if err := r.Runtime.RemoveContainer(ctx, ctr, false, true); err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Cause(err) == define.ErrNoSuchCtr ||
+ errors.Cause(err) == define.ErrCtrRemoved {
logrus.Warnf("Container %s does not exist: %v", ctr.ID(), err)
} else {
logrus.Errorf("Error removing container %s: %v", ctr.ID(), err)
@@ -483,8 +534,14 @@ func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) er
if c.NoStdin {
inputStream = nil
}
+
+ keys, err := r.selectDetachKeys(c.DetachKeys)
+ if err != nil {
+ return err
+ }
+
// If the container is in a pod, also set to recursively start dependencies
- if err := StartAttachCtr(ctx, ctr, os.Stdout, os.Stderr, inputStream, c.DetachKeys, c.SigProxy, false, ctr.PodID() != ""); err != nil && errors.Cause(err) != define.ErrDetach {
+ if err := StartAttachCtr(ctx, ctr, os.Stdout, os.Stderr, inputStream, keys, c.SigProxy, false, ctr.PodID() != ""); err != nil && errors.Cause(err) != define.ErrDetach {
return errors.Wrapf(err, "error attaching to container %s", ctr.ID())
}
return nil
@@ -617,9 +674,14 @@ func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigP
}
}
+ keys, err := r.selectDetachKeys(c.DetachKeys)
+ if err != nil {
+ return exitCode, err
+ }
+
// attach to the container and also start it not already running
// If the container is in a pod, also set to recursively start dependencies
- err = StartAttachCtr(ctx, ctr.Container, os.Stdout, os.Stderr, inputStream, c.DetachKeys, sigProxy, !ctrRunning, ctr.PodID() != "")
+ err = StartAttachCtr(ctx, ctr.Container, os.Stdout, os.Stderr, inputStream, keys, sigProxy, !ctrRunning, ctr.PodID() != "")
if errors.Cause(err) == define.ErrDetach {
// User manually detached
// Exit cleanly immediately
@@ -976,21 +1038,40 @@ func (r *LocalRuntime) ExecContainer(ctx context.Context, cli *cliconfig.ExecVal
streams.AttachOutput = true
streams.AttachError = true
- ec, err = ExecAttachCtr(ctx, ctr.Container, cli.Tty, cli.Privileged, env, cmd, cli.User, cli.Workdir, streams, uint(cli.PreserveFDs), cli.DetachKeys)
+ keys, err := r.selectDetachKeys(cli.DetachKeys)
+ if err != nil {
+ return ec, err
+ }
+
+ ec, err = ExecAttachCtr(ctx, ctr.Container, cli.Tty, cli.Privileged, env, cmd, cli.User, cli.Workdir, streams, uint(cli.PreserveFDs), keys)
return define.TranslateExecErrorToExitCode(ec, err), err
}
// Prune removes stopped containers
-func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool) ([]string, map[string]error, error) {
+func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool, filters []string) ([]string, map[string]error, error) {
var (
- ok = []string{}
- failures = map[string]error{}
- err error
+ ok = []string{}
+ failures = map[string]error{}
+ err error
+ filterFunc []libpod.ContainerFilter
)
logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
- filter := func(c *libpod.Container) bool {
+ for _, filter := range filters {
+ filterSplit := strings.SplitN(filter, "=", 2)
+ if len(filterSplit) < 2 {
+ return ok, failures, errors.Errorf("filter input must be in the form of filter=value: %s is invalid", filter)
+ }
+
+ f, err := shared.GenerateContainerFilterFuncs(filterSplit[0], filterSplit[1], r.Runtime)
+ if err != nil {
+ return ok, failures, err
+ }
+ filterFunc = append(filterFunc, f)
+ }
+
+ containerStateFilter := func(c *libpod.Container) bool {
state, err := c.State()
if err != nil {
logrus.Error(err)
@@ -1004,7 +1085,9 @@ func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool) ([
}
return false
}
- delContainers, err := r.Runtime.GetContainers(filter)
+ filterFunc = append(filterFunc, containerStateFilter)
+
+ delContainers, err := r.Runtime.GetContainers(filterFunc...)
if err != nil {
return ok, failures, err
}
diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go
index e34b8ffd9..36db4af68 100644
--- a/pkg/adapter/containers_remote.go
+++ b/pkg/adapter/containers_remote.go
@@ -922,7 +922,7 @@ func (r *LocalRuntime) Top(cli *cliconfig.TopValues) ([]string, error) {
}
// Prune removes stopped containers
-func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool) ([]string, map[string]error, error) {
+func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool, filter []string) ([]string, map[string]error, error) {
var (
ok = []string{}
diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go
index eafcc5e9b..a726153c0 100644
--- a/pkg/adapter/pods.go
+++ b/pkg/adapter/pods.go
@@ -15,8 +15,10 @@ import (
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/adapter/shortcuts"
+ ann "github.com/containers/libpod/pkg/annotations"
ns "github.com/containers/libpod/pkg/namespaces"
createconfig "github.com/containers/libpod/pkg/spec"
"github.com/containers/libpod/pkg/util"
@@ -75,7 +77,7 @@ func (r *LocalRuntime) PrunePods(ctx context.Context, cli *cliconfig.PodPruneVal
pool.Add(shared.Job{
ID: p.ID(),
Fn: func() error {
- err := r.Runtime.RemovePod(ctx, p, cli.Force, cli.Force)
+ err := r.Runtime.RemovePod(ctx, p, true, cli.Force)
if err != nil {
logrus.Debugf("Failed to remove pod %s: %s", p.ID(), err.Error())
}
@@ -93,13 +95,13 @@ func (r *LocalRuntime) RemovePods(ctx context.Context, cli *cliconfig.PodRmValue
podids []string
)
pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
- if err != nil {
+ if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
errs = append(errs, err)
return nil, errs
}
for _, p := range pods {
- if err := r.Runtime.RemovePod(ctx, p, cli.Force, cli.Force); err != nil {
+ if err := r.Runtime.RemovePod(ctx, p, true, cli.Force); err != nil {
errs = append(errs, err)
} else {
podids = append(podids, p.ID())
@@ -150,7 +152,7 @@ func (r *LocalRuntime) StopPods(ctx context.Context, cli *cliconfig.PodStopValue
podids []string
)
pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
- if err != nil {
+ if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
errs = append(errs, err)
return nil, errs
}
@@ -595,12 +597,17 @@ func (r *LocalRuntime) PlayKubeYAML(ctx context.Context, c *cliconfig.KubePlayVa
volumes[volume.Name] = hostPath.Path
}
+ seccompPaths, err := initializeSeccompPaths(podYAML.ObjectMeta.Annotations)
+ if err != nil {
+ return nil, err
+ }
+
for _, container := range podYAML.Spec.Containers {
newImage, err := r.ImageRuntime().New(ctx, container.Image, c.SignaturePolicy, c.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, util.PullImageMissing)
if err != nil {
return nil, err
}
- createConfig, err := kubeContainerToCreateConfig(ctx, container, r.Runtime, newImage, namespaces, volumes, pod.ID())
+ createConfig, err := kubeContainerToCreateConfig(ctx, container, r.Runtime, newImage, namespaces, volumes, pod.ID(), podInfraID, seccompPaths)
if err != nil {
return nil, err
}
@@ -719,7 +726,7 @@ func setupSecurityContext(securityConfig *createconfig.SecurityConfig, userConfi
}
// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container
-func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID string) (*createconfig.CreateConfig, error) {
+func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, infraID string, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) {
var (
containerConfig createconfig.CreateConfig
pidConfig createconfig.PidConfig
@@ -751,11 +758,7 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
setupSecurityContext(&securityConfig, &userConfig, containerYAML)
- var err error
- containerConfig.Security.SeccompProfilePath, err = libpod.DefaultSeccompPath()
- if err != nil {
- return nil, err
- }
+ securityConfig.SeccompProfilePath = seccompPaths.findForContainer(containerConfig.Name)
containerConfig.Command = []string{}
if imageData != nil && imageData.Config != nil {
@@ -800,6 +803,13 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
// Set default environment variables and incorporate data from image, if necessary
envs := shared.EnvVariablesFromData(imageData)
+ annotations := make(map[string]string)
+ if infraID != "" {
+ annotations[ann.SandboxID] = infraID
+ annotations[ann.ContainerType] = ann.ContainerTypeContainer
+ }
+ containerConfig.Annotations = annotations
+
// Environment Variables
for _, e := range containerYAML.Env {
envs[e.Name] = e.Value
@@ -818,3 +828,80 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
}
return &containerConfig, nil
}
+
+// kubeSeccompPaths holds information about a pod YAML's seccomp configuration
+// it holds both container and pod seccomp paths
+type kubeSeccompPaths struct {
+ containerPaths map[string]string
+ podPath string
+}
+
+// findForContainer checks whether a container has a seccomp path configured for it
+// if not, it returns the podPath, which should always have a value
+func (k *kubeSeccompPaths) findForContainer(ctrName string) string {
+ if path, ok := k.containerPaths[ctrName]; ok {
+ return path
+ }
+ return k.podPath
+}
+
+// initializeSeccompPaths takes annotations from the pod object metadata and finds annotations pertaining to seccomp
+// it parses both pod and container level
+func initializeSeccompPaths(annotations map[string]string) (*kubeSeccompPaths, error) {
+ seccompPaths := &kubeSeccompPaths{containerPaths: make(map[string]string)}
+ var err error
+ if annotations != nil {
+ for annKeyValue, seccomp := range annotations {
+ // check if it is prefaced with container.seccomp.security.alpha.kubernetes.io/
+ prefixAndCtr := strings.Split(annKeyValue, "/")
+ if prefixAndCtr[0]+"/" != v1.SeccompContainerAnnotationKeyPrefix {
+ continue
+ } else if len(prefixAndCtr) != 2 {
+ // this could be caused by a user inputting either of
+ // container.seccomp.security.alpha.kubernetes.io{,/}
+ // both of which are invalid
+ return nil, errors.Errorf("Invalid seccomp path: %s", prefixAndCtr[0])
+ }
+
+ path, err := verifySeccompPath(seccomp)
+ if err != nil {
+ return nil, err
+ }
+ seccompPaths.containerPaths[prefixAndCtr[1]] = path
+ }
+
+ podSeccomp, ok := annotations[v1.SeccompPodAnnotationKey]
+ if ok {
+ seccompPaths.podPath, err = verifySeccompPath(podSeccomp)
+ } else {
+ seccompPaths.podPath, err = libpod.DefaultSeccompPath()
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ return seccompPaths, nil
+}
+
+// verifySeccompPath takes a path and checks whether it is a default, unconfined, or a path
+// the available options are parsed as defined in https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+func verifySeccompPath(path string) (string, error) {
+ switch path {
+ case v1.DeprecatedSeccompProfileDockerDefault:
+ fallthrough
+ case v1.SeccompProfileRuntimeDefault:
+ return libpod.DefaultSeccompPath()
+ case "unconfined":
+ return path, nil
+ default:
+ // TODO we have an inconsistency here
+ // k8s parses `localhost/<path>` which is found at `<seccomp_root>`
+ // we currently parse `localhost:<seccomp_root>/<path>
+ // to fully conform, we need to find a good location for the seccomp root
+ parts := strings.Split(path, ":")
+ if parts[0] == "localhost" {
+ return parts[1], nil
+ }
+ return "", errors.Errorf("invalid seccomp path: %s", path)
+ }
+}
diff --git a/pkg/adapter/reset.go b/pkg/adapter/reset.go
new file mode 100644
index 000000000..0decc3d15
--- /dev/null
+++ b/pkg/adapter/reset.go
@@ -0,0 +1,13 @@
+// +build !remoteclient
+
+package adapter
+
+import (
+ "context"
+)
+
+// Reset the container storage back to initial states.
+// Removes all Pods, Containers, Images and Volumes.
+func (r *LocalRuntime) Reset() error {
+ return r.Runtime.Reset(context.TODO())
+}
diff --git a/pkg/adapter/reset_remote.go b/pkg/adapter/reset_remote.go
new file mode 100644
index 000000000..663fab639
--- /dev/null
+++ b/pkg/adapter/reset_remote.go
@@ -0,0 +1,12 @@
+// +build remoteclient
+
+package adapter
+
+import (
+ "github.com/containers/libpod/cmd/podman/varlink"
+)
+
+// Info returns information for the host system and its components
+func (r RemoteRuntime) Reset() error {
+ return iopodman.Reset().Call(r.Conn)
+}
diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go
index 81a43853c..069283bde 100644
--- a/pkg/adapter/runtime.go
+++ b/pkg/adapter/runtime.go
@@ -27,7 +27,7 @@ import (
"github.com/containers/libpod/pkg/util"
"github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
- "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
)
// LocalRuntime describes a typical libpod runtime
@@ -147,8 +147,8 @@ func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, for
}
// PruneImages is wrapper into PruneImages within the image pkg
-func (r *LocalRuntime) PruneImages(ctx context.Context, all bool) ([]string, error) {
- return r.ImageRuntime().PruneImages(ctx, all)
+func (r *LocalRuntime) PruneImages(ctx context.Context, all bool, filter []string) ([]string, error) {
+ return r.ImageRuntime().PruneImages(ctx, all, filter)
}
// Export is a wrapper to container export to a tarfile
diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go
index 01bc8e454..f9232897c 100644
--- a/pkg/adapter/runtime_remote.go
+++ b/pkg/adapter/runtime_remote.go
@@ -136,21 +136,22 @@ type ContainerImage struct {
}
type remoteImage struct {
- ID string
- Labels map[string]string
- RepoTags []string
- RepoDigests []string
- Parent string
- Size int64
- Created time.Time
- InputName string
- Names []string
- Digest digest.Digest
- Digests []digest.Digest
- isParent bool
- Runtime *LocalRuntime
- TopLayer string
- ReadOnly bool
+ ID string
+ Labels map[string]string
+ RepoTags []string
+ RepoDigests []string
+ Parent string
+ Size int64
+ Created time.Time
+ InputName string
+ Names []string
+ Digest digest.Digest
+ Digests []digest.Digest
+ isParent bool
+ Runtime *LocalRuntime
+ TopLayer string
+ ReadOnly bool
+ NamesHistory []string
}
// Container ...
@@ -232,21 +233,22 @@ func imageInListToContainerImage(i iopodman.Image, name string, runtime *LocalRu
digests = append(digests, digest.Digest(d))
}
ri := remoteImage{
- InputName: name,
- ID: i.Id,
- Digest: digest.Digest(i.Digest),
- Digests: digests,
- Labels: i.Labels,
- RepoTags: i.RepoTags,
- RepoDigests: i.RepoTags,
- Parent: i.ParentId,
- Size: i.Size,
- Created: created,
- Names: i.RepoTags,
- isParent: i.IsParent,
- Runtime: runtime,
- TopLayer: i.TopLayer,
- ReadOnly: i.ReadOnly,
+ InputName: name,
+ ID: i.Id,
+ Digest: digest.Digest(i.Digest),
+ Digests: digests,
+ Labels: i.Labels,
+ RepoTags: i.RepoTags,
+ RepoDigests: i.RepoTags,
+ Parent: i.ParentId,
+ Size: i.Size,
+ Created: created,
+ Names: i.RepoTags,
+ isParent: i.IsParent,
+ Runtime: runtime,
+ TopLayer: i.TopLayer,
+ ReadOnly: i.ReadOnly,
+ NamesHistory: i.History,
}
return &ContainerImage{ri}, nil
}
@@ -337,6 +339,11 @@ func (ci *ContainerImage) Names() []string {
return ci.remoteImage.Names
}
+// NamesHistory returns a string array of names previously associated with the image
+func (ci *ContainerImage) NamesHistory() []string {
+ return ci.remoteImage.NamesHistory
+}
+
// Created returns the time the image was created
func (ci *ContainerImage) Created() time.Time {
return ci.remoteImage.Created
@@ -415,8 +422,8 @@ func (ci *ContainerImage) History(ctx context.Context) ([]*image.History, error)
}
// PruneImages is the wrapper call for a remote-client to prune images
-func (r *LocalRuntime) PruneImages(ctx context.Context, all bool) ([]string, error) {
- return iopodman.ImagesPrune().Call(r.Conn, all)
+func (r *LocalRuntime) PruneImages(ctx context.Context, all bool, filter []string) ([]string, error) {
+ return iopodman.ImagesPrune().Call(r.Conn, all, filter)
}
// Export is a wrapper to container export to a tarfile
diff --git a/pkg/adapter/shortcuts/shortcuts.go b/pkg/adapter/shortcuts/shortcuts.go
index 3e4eff555..4f6cfd6a3 100644
--- a/pkg/adapter/shortcuts/shortcuts.go
+++ b/pkg/adapter/shortcuts/shortcuts.go
@@ -2,9 +2,11 @@ package shortcuts
import (
"github.com/containers/libpod/libpod"
+ "github.com/sirupsen/logrus"
)
-// GetPodsByContext gets pods whether all, latest, or a slice of names/ids
+// GetPodsByContext returns a slice of pods. Note that all, latest and pods are
+// mutually exclusive arguments.
func GetPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime) ([]*libpod.Pod, error) {
var outpods []*libpod.Pod
if all {
@@ -18,17 +20,24 @@ func GetPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime)
outpods = append(outpods, p)
return outpods, nil
}
+ var err error
for _, p := range pods {
- pod, err := runtime.LookupPod(p)
- if err != nil {
- return nil, err
+ pod, e := runtime.LookupPod(p)
+ if e != nil {
+ // Log all errors here, so callers don't need to.
+ logrus.Debugf("Error looking up pod %q: %v", p, e)
+ if err == nil {
+ err = e
+ }
+ } else {
+ outpods = append(outpods, pod)
}
- outpods = append(outpods, pod)
}
- return outpods, nil
+ return outpods, err
}
// GetContainersByContext gets pods whether all, latest, or a slice of names/ids
+// is specified.
func GetContainersByContext(all, latest bool, names []string, runtime *libpod.Runtime) (ctrs []*libpod.Container, err error) {
var ctr *libpod.Container
ctrs = []*libpod.Container{}
@@ -41,10 +50,15 @@ func GetContainersByContext(all, latest bool, names []string, runtime *libpod.Ru
} else {
for _, n := range names {
ctr, e := runtime.LookupContainer(n)
- if e != nil && err == nil {
- err = e
+ if e != nil {
+ // Log all errors here, so callers don't need to.
+ logrus.Debugf("Error looking up container %q: %v", n, e)
+ if err == nil {
+ err = e
+ }
+ } else {
+ ctrs = append(ctrs, ctr)
}
- ctrs = append(ctrs, ctr)
}
}
return
diff --git a/pkg/rootless/rootless_linux.c b/pkg/rootless/rootless_linux.c
index 9604de638..193c788c0 100644
--- a/pkg/rootless/rootless_linux.c
+++ b/pkg/rootless/rootless_linux.c
@@ -19,24 +19,31 @@
#include <sys/select.h>
#include <stdio.h>
-#ifndef RENAME_NOREPLACE
-# define RENAME_NOREPLACE (1 << 0)
-
-int renameat2 (int olddirfd, const char *oldpath, int newdirfd, const char *newpath, unsigned int flags)
+int rename_noreplace (int olddirfd, const char *oldpath, int newdirfd, const char *newpath)
{
+ int ret;
+
# ifdef SYS_renameat2
- return (int) syscall (SYS_renameat2, olddirfd, oldpath, newdirfd, newpath, flags);
-# else
+# ifndef RENAME_NOREPLACE
+# define RENAME_NOREPLACE (1 << 0)
+# endif
+
+ ret = (int) syscall (SYS_renameat2, olddirfd, oldpath, newdirfd, newpath, RENAME_NOREPLACE);
+ if (ret == 0 || errno != EINVAL)
+ return ret;
+
+ /* Fallback in case of errno==EINVAL. */
+# endif
+
/* This might be an issue if another process is trying to read the file while it is empty. */
- int fd = open (newpath, O_EXCL|O_CREAT, 0700);
- if (fd < 0)
- return fd;
- close (fd);
+ ret = open (newpath, O_EXCL|O_CREAT, 0700);
+ if (ret < 0)
+ return ret;
+ close (ret);
+
/* We are sure we created the file, let's overwrite it. */
return rename (oldpath, newpath);
-# endif
}
-#endif
#ifndef TEMP_FAILURE_RETRY
#define TEMP_FAILURE_RETRY(expression) \
@@ -453,7 +460,7 @@ create_pause_process (const char *pause_pid_file_path, char **argv)
/* There can be another process at this point trying to configure the user namespace and the pause
process, do not override the pid file if it already exists. */
- if (renameat2 (AT_FDCWD, tmp_file_path, AT_FDCWD, pause_pid_file_path, RENAME_NOREPLACE) < 0)
+ if (rename_noreplace (AT_FDCWD, tmp_file_path, AT_FDCWD, pause_pid_file_path) < 0)
{
unlink (tmp_file_path);
kill (pid, SIGKILL);
diff --git a/pkg/timetype/timestamp.go b/pkg/timetype/timestamp.go
new file mode 100644
index 000000000..eb904a574
--- /dev/null
+++ b/pkg/timetype/timestamp.go
@@ -0,0 +1,131 @@
+package timetype
+
+// code adapted from https://github.com/moby/moby/blob/master/api/types/time/timestamp.go
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// These are additional predefined layouts for use in Time.Format and Time.Parse
+// with --since and --until parameters for `docker logs` and `docker events`
+const (
+ rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
+ rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
+ dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
+ dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
+)
+
+// GetTimestamp tries to parse given string as golang duration,
+// then RFC3339 time and finally as a Unix timestamp. If
+// any of these were successful, it returns a Unix timestamp
+// as string otherwise returns the given value back.
+// In case of duration input, the returned timestamp is computed
+// as the given reference time minus the amount of the duration.
+func GetTimestamp(value string, reference time.Time) (string, error) {
+ if d, err := time.ParseDuration(value); value != "0" && err == nil {
+ return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil
+ }
+
+ var format string
+ // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
+ parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
+
+ if strings.Contains(value, ".") {
+ if parseInLocation {
+ format = rFC3339NanoLocal
+ } else {
+ format = time.RFC3339Nano
+ }
+ } else if strings.Contains(value, "T") {
+ // we want the number of colons in the T portion of the timestamp
+ tcolons := strings.Count(value, ":")
+ // if parseInLocation is off and we have a +/- zone offset (not Z) then
+ // there will be an extra colon in the input for the tz offset subtract that
+ // colon from the tcolons count
+ if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
+ tcolons--
+ }
+ if parseInLocation {
+ switch tcolons {
+ case 0:
+ format = "2006-01-02T15"
+ case 1:
+ format = "2006-01-02T15:04"
+ default:
+ format = rFC3339Local
+ }
+ } else {
+ switch tcolons {
+ case 0:
+ format = "2006-01-02T15Z07:00"
+ case 1:
+ format = "2006-01-02T15:04Z07:00"
+ default:
+ format = time.RFC3339
+ }
+ }
+ } else if parseInLocation {
+ format = dateLocal
+ } else {
+ format = dateWithZone
+ }
+
+ var t time.Time
+ var err error
+
+ if parseInLocation {
+ t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone()))
+ } else {
+ t, err = time.Parse(format, value)
+ }
+
+ if err != nil {
+ // if there is a `-` then it's an RFC3339 like timestamp
+ if strings.Contains(value, "-") {
+ return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
+ }
+ if _, _, err := parseTimestamp(value); err != nil {
+ return "", fmt.Errorf("failed to parse value as time or duration: %q", value)
+ }
+ return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
+ }
+
+ return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
+}
+
+// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
+// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
+// if the incoming nanosecond portion is longer or shorter than 9 digits it is
+// converted to nanoseconds. The expectation is that the seconds and
+// seconds will be used to create a time variable. For example:
+// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
+// if err == nil since := time.Unix(seconds, nanoseconds)
+// returns seconds as def(aultSeconds) if value == ""
+func ParseTimestamps(value string, def int64) (int64, int64, error) {
+ if value == "" {
+ return def, 0, nil
+ }
+ return parseTimestamp(value)
+}
+
+func parseTimestamp(value string) (int64, int64, error) {
+ sa := strings.SplitN(value, ".", 2)
+ s, err := strconv.ParseInt(sa[0], 10, 64)
+ if err != nil {
+ return s, 0, err
+ }
+ if len(sa) != 2 {
+ return s, 0, nil
+ }
+ n, err := strconv.ParseInt(sa[1], 10, 64)
+ if err != nil {
+ return s, n, err
+ }
+ // should already be in nanoseconds but just in case convert n to nanoseconds
+ n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1]))))
+ return s, n, nil
+}
diff --git a/pkg/timetype/timestamp_test.go b/pkg/timetype/timestamp_test.go
new file mode 100644
index 000000000..0fffb85a9
--- /dev/null
+++ b/pkg/timetype/timestamp_test.go
@@ -0,0 +1,95 @@
+package timetype
+
+// code adapted from https://github.com/moby/moby/blob/master/api/types/time/timestamp.go
+
+import (
+ "fmt"
+ "testing"
+ "time"
+)
+
+func TestGetTimestamp(t *testing.T) {
+ now := time.Now().In(time.UTC)
+ cases := []struct {
+ in, expected string
+ expectedErr bool
+ }{
+ // Partial RFC3339 strings get parsed with second precision
+ {"2006-01-02T15:04:05.999999999+07:00", "1136189045.999999999", false},
+ {"2006-01-02T15:04:05.999999999Z", "1136214245.999999999", false},
+ {"2006-01-02T15:04:05.999999999", "1136214245.999999999", false},
+ {"2006-01-02T15:04:05Z", "1136214245.000000000", false},
+ {"2006-01-02T15:04:05", "1136214245.000000000", false},
+ {"2006-01-02T15:04:0Z", "", true},
+ {"2006-01-02T15:04:0", "", true},
+ {"2006-01-02T15:04Z", "1136214240.000000000", false},
+ {"2006-01-02T15:04+00:00", "1136214240.000000000", false},
+ {"2006-01-02T15:04-00:00", "1136214240.000000000", false},
+ {"2006-01-02T15:04", "1136214240.000000000", false},
+ {"2006-01-02T15:0Z", "", true},
+ {"2006-01-02T15:0", "", true},
+ {"2006-01-02T15Z", "1136214000.000000000", false},
+ {"2006-01-02T15+00:00", "1136214000.000000000", false},
+ {"2006-01-02T15-00:00", "1136214000.000000000", false},
+ {"2006-01-02T15", "1136214000.000000000", false},
+ {"2006-01-02T1Z", "1136163600.000000000", false},
+ {"2006-01-02T1", "1136163600.000000000", false},
+ {"2006-01-02TZ", "", true},
+ {"2006-01-02T", "", true},
+ {"2006-01-02+00:00", "1136160000.000000000", false},
+ {"2006-01-02-00:00", "1136160000.000000000", false},
+ {"2006-01-02-00:01", "1136160060.000000000", false},
+ {"2006-01-02Z", "1136160000.000000000", false},
+ {"2006-01-02", "1136160000.000000000", false},
+ {"2015-05-13T20:39:09Z", "1431549549.000000000", false},
+
+ // unix timestamps returned as is
+ {"1136073600", "1136073600", false},
+ {"1136073600.000000001", "1136073600.000000001", false},
+ // Durations
+ {"1m", fmt.Sprintf("%d", now.Add(-1*time.Minute).Unix()), false},
+ {"1.5h", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false},
+ {"1h30m", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false},
+
+ {"invalid", "", true},
+ {"", "", true},
+ }
+
+ for _, c := range cases {
+ o, err := GetTimestamp(c.in, now)
+ if o != c.expected ||
+ (err == nil && c.expectedErr) ||
+ (err != nil && !c.expectedErr) {
+ t.Errorf("wrong value for '%s'. expected:'%s' got:'%s' with error: `%s`", c.in, c.expected, o, err)
+ t.Fail()
+ }
+ }
+}
+
+func TestParseTimestamps(t *testing.T) {
+ cases := []struct {
+ in string
+ def, expectedS, expectedN int64
+ expectedErr bool
+ }{
+ // unix timestamps
+ {"1136073600", 0, 1136073600, 0, false},
+ {"1136073600.000000001", 0, 1136073600, 1, false},
+ {"1136073600.0000000010", 0, 1136073600, 1, false},
+ {"1136073600.00000001", 0, 1136073600, 10, false},
+ {"foo.bar", 0, 0, 0, true},
+ {"1136073600.bar", 0, 1136073600, 0, true},
+ {"", -1, -1, 0, false},
+ }
+
+ for _, c := range cases {
+ s, n, err := ParseTimestamps(c.in, c.def)
+ if s != c.expectedS ||
+ n != c.expectedN ||
+ (err == nil && c.expectedErr) ||
+ (err != nil && !c.expectedErr) {
+ t.Errorf("wrong values for input `%s` with default `%d` expected:'%d'seconds and `%d`nanosecond got:'%d'seconds and `%d`nanoseconds with error: `%s`", c.in, c.def, c.expectedS, c.expectedN, s, n, err)
+ t.Fail()
+ }
+ }
+}
diff --git a/pkg/util/utils.go b/pkg/util/utils.go
index 6906b26d5..5b4dfe9fa 100644
--- a/pkg/util/utils.go
+++ b/pkg/util/utils.go
@@ -1,11 +1,12 @@
package util
import (
+ "encoding/json"
"fmt"
"os"
"os/user"
"path/filepath"
- "regexp"
+ "strconv"
"strings"
"sync"
"time"
@@ -18,6 +19,7 @@ import (
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
+ "github.com/docker/docker/pkg/signal"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -71,118 +73,236 @@ func StringInSlice(s string, sl []string) bool {
return false
}
-// ParseChanges returns key, value(s) pair for given option.
-func ParseChanges(option string) (key string, vals []string, err error) {
- // Supported format as below
- // 1. key=value
- // 2. key value
- // 3. key ["value","value1"]
- if strings.Contains(option, " ") {
- // This handles 2 & 3 conditions.
- var val string
- tokens := strings.SplitAfterN(option, " ", 2)
- if len(tokens) < 2 {
- return "", []string{}, fmt.Errorf("invalid key value %s", option)
- }
- key = strings.Trim(tokens[0], " ") // Need to trim whitespace part of delimiter.
- val = tokens[1]
- if strings.Contains(tokens[1], "[") && strings.Contains(tokens[1], "]") {
- //Trim '[',']' if exist.
- val = strings.TrimLeft(strings.TrimRight(tokens[1], "]"), "[")
- }
- vals = strings.Split(val, ",")
- } else if strings.Contains(option, "=") {
- // handles condition 1.
- tokens := strings.Split(option, "=")
- key = tokens[0]
- vals = tokens[1:]
- } else {
- // either ` ` or `=` must be provided after command
- return "", []string{}, fmt.Errorf("invalid format %s", option)
- }
-
- if len(vals) == 0 {
- return "", []string{}, errors.Errorf("no value given for instruction %q", key)
- }
-
- for _, v := range vals {
- //each option must not have ' '., `[`` or `]` & empty strings
- whitespaces := regexp.MustCompile(`[\[\s\]]`)
- if whitespaces.MatchString(v) || len(v) == 0 {
- return "", []string{}, fmt.Errorf("invalid value %s", v)
- }
- }
- return key, vals, nil
+// ImageConfig is a wrapper around the OCIv1 Image Configuration struct exported
+// by containers/image, but containing additional fields that are not supported
+// by OCIv1 (but are by Docker v2) - notably OnBuild.
+type ImageConfig struct {
+ v1.ImageConfig
+ OnBuild []string
}
-// GetImageConfig converts the --change flag values in the format "CMD=/bin/bash USER=example"
-// to a type v1.ImageConfig
-func GetImageConfig(changes []string) (v1.ImageConfig, error) {
- // USER=value | EXPOSE=value | ENV=value | ENTRYPOINT=value |
- // CMD=value | VOLUME=value | WORKDIR=value | LABEL=key=value | STOPSIGNAL=value
-
- var (
- user string
- env []string
- entrypoint []string
- cmd []string
- workingDir string
- stopSignal string
- )
-
- exposedPorts := make(map[string]struct{})
- volumes := make(map[string]struct{})
- labels := make(map[string]string)
- for _, ch := range changes {
- key, vals, err := ParseChanges(ch)
- if err != nil {
- return v1.ImageConfig{}, err
+// GetImageConfig produces a v1.ImageConfig from the --change flag that is
+// accepted by several Podman commands. It accepts a (limited subset) of
+// Dockerfile instructions.
+func GetImageConfig(changes []string) (ImageConfig, error) {
+ // Valid changes:
+ // USER
+ // EXPOSE
+ // ENV
+ // ENTRYPOINT
+ // CMD
+ // VOLUME
+ // WORKDIR
+ // LABEL
+ // STOPSIGNAL
+ // ONBUILD
+
+ config := ImageConfig{}
+
+ for _, change := range changes {
+ // First, let's assume proper Dockerfile format - space
+ // separator between instruction and value
+ split := strings.SplitN(change, " ", 2)
+
+ if len(split) != 2 {
+ split = strings.SplitN(change, "=", 2)
+ if len(split) != 2 {
+ return ImageConfig{}, errors.Errorf("invalid change %q - must be formatted as KEY VALUE", change)
+ }
}
- switch key {
+ outerKey := strings.ToUpper(strings.TrimSpace(split[0]))
+ value := strings.TrimSpace(split[1])
+ switch outerKey {
case "USER":
- user = vals[0]
+ // Assume literal contents are the user.
+ if value == "" {
+ return ImageConfig{}, errors.Errorf("invalid change %q - must provide a value to USER", change)
+ }
+ config.User = value
case "EXPOSE":
- var st struct{}
- exposedPorts[vals[0]] = st
+ // EXPOSE is either [portnum] or
+ // [portnum]/[proto]
+ // Protocol must be "tcp" or "udp"
+ splitPort := strings.Split(value, "/")
+ if len(splitPort) > 2 {
+ return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change)
+ }
+ portNum, err := strconv.Atoi(splitPort[0])
+ if err != nil {
+ return ImageConfig{}, errors.Wrapf(err, "invalid change %q - EXPOSE port must be an integer", change)
+ }
+ if portNum > 65535 || portNum <= 0 {
+ return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE port must be a valid port number", change)
+ }
+ proto := "tcp"
+ if len(splitPort) > 1 {
+ testProto := strings.ToLower(splitPort[1])
+ switch testProto {
+ case "tcp", "udp":
+ proto = testProto
+ default:
+ return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change)
+ }
+ }
+ if config.ExposedPorts == nil {
+ config.ExposedPorts = make(map[string]struct{})
+ }
+ config.ExposedPorts[fmt.Sprintf("%d/%s", portNum, proto)] = struct{}{}
case "ENV":
- if len(vals) < 2 {
- return v1.ImageConfig{}, errors.Errorf("no value given for environment variable %q", vals[0])
+ // Format is either:
+ // ENV key=value
+ // ENV key=value key=value ...
+ // ENV key value
+ // Both keys and values can be surrounded by quotes to group them.
+ // For now: we only support key=value
+ // We will attempt to strip quotation marks if present.
+
+ var (
+ key, val string
+ )
+
+ splitEnv := strings.SplitN(value, "=", 2)
+ key = splitEnv[0]
+ // We do need a key
+ if key == "" {
+ return ImageConfig{}, errors.Errorf("invalid change %q - ENV must have at least one argument", change)
+ }
+ // Perfectly valid to not have a value
+ if len(splitEnv) == 2 {
+ val = splitEnv[1]
+ }
+
+ if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) {
+ key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`)
+ }
+ if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) {
+ val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`)
}
- env = append(env, strings.Join(vals[0:], "="))
+ config.Env = append(config.Env, fmt.Sprintf("%s=%s", key, val))
case "ENTRYPOINT":
- // ENTRYPOINT and CMD can have array of strings
- entrypoint = append(entrypoint, vals...)
+ // Two valid forms.
+ // First, JSON array.
+ // Second, not a JSON array - we interpret this as an
+ // argument to `sh -c`, unless empty, in which case we
+ // just use a blank entrypoint.
+ testUnmarshal := []string{}
+ if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil {
+ // It ain't valid JSON, so assume it's an
+ // argument to sh -c if not empty.
+ if value != "" {
+ config.Entrypoint = []string{"/bin/sh", "-c", value}
+ } else {
+ config.Entrypoint = []string{}
+ }
+ } else {
+ // Valid JSON
+ config.Entrypoint = testUnmarshal
+ }
case "CMD":
- // ENTRYPOINT and CMD can have array of strings
- cmd = append(cmd, vals...)
+ // Same valid forms as entrypoint.
+ // However, where ENTRYPOINT assumes that 'ENTRYPOINT '
+ // means no entrypoint, CMD assumes it is 'sh -c' with
+ // no third argument.
+ testUnmarshal := []string{}
+ if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil {
+ // It ain't valid JSON, so assume it's an
+ // argument to sh -c.
+ // Only include volume if it's not ""
+ config.Cmd = []string{"/bin/sh", "-c"}
+ if value != "" {
+ config.Cmd = append(config.Cmd, value)
+ }
+ } else {
+ // Valid JSON
+ config.Cmd = testUnmarshal
+ }
case "VOLUME":
- var st struct{}
- volumes[vals[0]] = st
+ // Either a JSON array or a set of space-separated
+ // paths.
+ // Acts rather similar to ENTRYPOINT and CMD, but always
+ // appends rather than replacing, and no sh -c prepend.
+ testUnmarshal := []string{}
+ if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil {
+ // Not valid JSON, so split on spaces
+ testUnmarshal = strings.Split(value, " ")
+ }
+ if len(testUnmarshal) == 0 {
+ return ImageConfig{}, errors.Errorf("invalid change %q - must provide at least one argument to VOLUME", change)
+ }
+ for _, vol := range testUnmarshal {
+ if vol == "" {
+ return ImageConfig{}, errors.Errorf("invalid change %q - VOLUME paths must not be empty", change)
+ }
+ if config.Volumes == nil {
+ config.Volumes = make(map[string]struct{})
+ }
+ config.Volumes[vol] = struct{}{}
+ }
case "WORKDIR":
- workingDir = vals[0]
+ // This can be passed multiple times.
+ // Each successive invocation is treated as relative to
+ // the previous one - so WORKDIR /A, WORKDIR b,
+ // WORKDIR c results in /A/b/c
+ // Just need to check it's not empty...
+ if value == "" {
+ return ImageConfig{}, errors.Errorf("invalid change %q - must provide a non-empty WORKDIR", change)
+ }
+ config.WorkingDir = filepath.Join(config.WorkingDir, value)
case "LABEL":
- if len(vals) == 2 {
- labels[vals[0]] = vals[1]
- } else {
- labels[vals[0]] = ""
+ // Same general idea as ENV, but we no longer allow " "
+ // as a separator.
+ // We didn't do that for ENV either, so nice and easy.
+ // Potentially problematic: LABEL might theoretically
+ // allow an = in the key? If people really do this, we
+ // may need to investigate more advanced parsing.
+ var (
+ key, val string
+ )
+
+ splitLabel := strings.SplitN(value, "=", 2)
+ // Unlike ENV, LABEL must have a value
+ if len(splitLabel) != 2 {
+ return ImageConfig{}, errors.Errorf("invalid change %q - LABEL must be formatted key=value", change)
+ }
+ key = splitLabel[0]
+ val = splitLabel[1]
+
+ if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) {
+ key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`)
+ }
+ if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) {
+ val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`)
+ }
+ // Check key after we strip quotations
+ if key == "" {
+ return ImageConfig{}, errors.Errorf("invalid change %q - LABEL must have a non-empty key", change)
}
+ if config.Labels == nil {
+ config.Labels = make(map[string]string)
+ }
+ config.Labels[key] = val
case "STOPSIGNAL":
- stopSignal = vals[0]
+ // Check the provided signal for validity.
+ // TODO: Worth checking range? ParseSignal allows
+ // negative numbers.
+ killSignal, err := signal.ParseSignal(value)
+ if err != nil {
+ return ImageConfig{}, errors.Wrapf(err, "invalid change %q - KILLSIGNAL must be given a valid signal", change)
+ }
+ config.StopSignal = fmt.Sprintf("%d", killSignal)
+ case "ONBUILD":
+ // Onbuild always appends.
+ if value == "" {
+ return ImageConfig{}, errors.Errorf("invalid change %q - ONBUILD must be given an argument", change)
+ }
+ config.OnBuild = append(config.OnBuild, value)
+ default:
+ return ImageConfig{}, errors.Errorf("invalid change %q - invalid instruction %s", change, outerKey)
}
}
- return v1.ImageConfig{
- User: user,
- ExposedPorts: exposedPorts,
- Env: env,
- Entrypoint: entrypoint,
- Cmd: cmd,
- Volumes: volumes,
- WorkingDir: workingDir,
- Labels: labels,
- StopSignal: stopSignal,
- }, nil
+ return config, nil
}
// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping
diff --git a/pkg/util/utils_supported.go b/pkg/util/utils_supported.go
index 253460686..0b78a8150 100644
--- a/pkg/util/utils_supported.go
+++ b/pkg/util/utils_supported.go
@@ -20,6 +20,10 @@ import (
func GetRuntimeDir() (string, error) {
var rootlessRuntimeDirError error
+ if !rootless.IsRootless() {
+ return "", nil
+ }
+
rootlessRuntimeDirOnce.Do(func() {
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
uid := fmt.Sprintf("%d", rootless.GetRootlessUID())
diff --git a/pkg/util/utils_test.go b/pkg/util/utils_test.go
index c938dc592..f4b03599d 100644
--- a/pkg/util/utils_test.go
+++ b/pkg/util/utils_test.go
@@ -4,6 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
var (
@@ -19,70 +20,247 @@ func TestStringInSlice(t *testing.T) {
assert.False(t, StringInSlice("one", []string{}))
}
-func TestParseChanges(t *testing.T) {
- // CMD=/bin/sh
- _, vals, err := ParseChanges("CMD=/bin/sh")
- assert.EqualValues(t, []string{"/bin/sh"}, vals)
- assert.NoError(t, err)
-
- // CMD [/bin/sh]
- _, vals, err = ParseChanges("CMD [/bin/sh]")
- assert.EqualValues(t, []string{"/bin/sh"}, vals)
- assert.NoError(t, err)
-
- // CMD ["/bin/sh"]
- _, vals, err = ParseChanges(`CMD ["/bin/sh"]`)
- assert.EqualValues(t, []string{`"/bin/sh"`}, vals)
- assert.NoError(t, err)
-
- // CMD ["/bin/sh","-c","ls"]
- _, vals, err = ParseChanges(`CMD ["/bin/sh","c","ls"]`)
- assert.EqualValues(t, []string{`"/bin/sh"`, `"c"`, `"ls"`}, vals)
- assert.NoError(t, err)
-
- // CMD ["/bin/sh","arg-with,comma"]
- _, vals, err = ParseChanges(`CMD ["/bin/sh","arg-with,comma"]`)
- assert.EqualValues(t, []string{`"/bin/sh"`, `"arg-with`, `comma"`}, vals)
- assert.NoError(t, err)
-
- // CMD "/bin/sh"]
- _, _, err = ParseChanges(`CMD "/bin/sh"]`)
- assert.Error(t, err)
- assert.Equal(t, `invalid value "/bin/sh"]`, err.Error())
-
- // CMD [bin/sh
- _, _, err = ParseChanges(`CMD "/bin/sh"]`)
- assert.Error(t, err)
- assert.Equal(t, `invalid value "/bin/sh"]`, err.Error())
-
- // CMD ["/bin /sh"]
- _, _, err = ParseChanges(`CMD ["/bin /sh"]`)
- assert.Error(t, err)
- assert.Equal(t, `invalid value "/bin /sh"`, err.Error())
-
- // CMD ["/bin/sh", "-c","ls"] whitespace between values
- _, vals, err = ParseChanges(`CMD ["/bin/sh", "c","ls"]`)
- assert.Error(t, err)
- assert.Equal(t, `invalid value "c"`, err.Error())
-
- // CMD?
- _, _, err = ParseChanges(`CMD?`)
- assert.Error(t, err)
- assert.Equal(t, `invalid format CMD?`, err.Error())
-
- // empty values for CMD
- _, _, err = ParseChanges(`CMD `)
- assert.Error(t, err)
- assert.Equal(t, `invalid value `, err.Error())
-
- // LABEL=blue=image
- _, vals, err = ParseChanges(`LABEL=blue=image`)
- assert.EqualValues(t, []string{"blue", "image"}, vals)
- assert.NoError(t, err)
-
- // LABEL = blue=image
- _, vals, err = ParseChanges(`LABEL = blue=image`)
- assert.Error(t, err)
- assert.Equal(t, `invalid value = blue=image`, err.Error())
+func TestGetImageConfigUser(t *testing.T) {
+ validUser, err := GetImageConfig([]string{"USER valid"})
+ require.Nil(t, err)
+ assert.Equal(t, validUser.User, "valid")
+ validUser2, err := GetImageConfig([]string{"USER test_user_2"})
+ require.Nil(t, err)
+ assert.Equal(t, validUser2.User, "test_user_2")
+
+ _, err = GetImageConfig([]string{"USER "})
+ assert.NotNil(t, err)
+}
+
+func TestGetImageConfigExpose(t *testing.T) {
+ validPortNoProto, err := GetImageConfig([]string{"EXPOSE 80"})
+ require.Nil(t, err)
+ _, exists := validPortNoProto.ExposedPorts["80/tcp"]
+ assert.True(t, exists)
+
+ validPortTCP, err := GetImageConfig([]string{"EXPOSE 80/tcp"})
+ require.Nil(t, err)
+ _, exists = validPortTCP.ExposedPorts["80/tcp"]
+ assert.True(t, exists)
+
+ validPortUDP, err := GetImageConfig([]string{"EXPOSE 80/udp"})
+ require.Nil(t, err)
+ _, exists = validPortUDP.ExposedPorts["80/udp"]
+ assert.True(t, exists)
+
+ _, err = GetImageConfig([]string{"EXPOSE 99999"})
+ assert.NotNil(t, err)
+
+ _, err = GetImageConfig([]string{"EXPOSE 80/notaproto"})
+ assert.NotNil(t, err)
+
+ _, err = GetImageConfig([]string{"EXPOSE "})
+ assert.NotNil(t, err)
+
+ _, err = GetImageConfig([]string{"EXPOSE thisisnotanumber"})
+ assert.NotNil(t, err)
+}
+
+func TestGetImageConfigEnv(t *testing.T) {
+ validEnvNoValue, err := GetImageConfig([]string{"ENV key"})
+ require.Nil(t, err)
+ assert.True(t, StringInSlice("key=", validEnvNoValue.Env))
+
+ validEnvBareEquals, err := GetImageConfig([]string{"ENV key="})
+ require.Nil(t, err)
+ assert.True(t, StringInSlice("key=", validEnvBareEquals.Env))
+
+ validEnvKeyValue, err := GetImageConfig([]string{"ENV key=value"})
+ require.Nil(t, err)
+ assert.True(t, StringInSlice("key=value", validEnvKeyValue.Env))
+
+ validEnvKeyMultiEntryValue, err := GetImageConfig([]string{`ENV key="value1 value2"`})
+ require.Nil(t, err)
+ assert.True(t, StringInSlice("key=value1 value2", validEnvKeyMultiEntryValue.Env))
+
+ _, err = GetImageConfig([]string{"ENV "})
+ assert.NotNil(t, err)
+}
+
+func TestGetImageConfigEntrypoint(t *testing.T) {
+ binShEntrypoint, err := GetImageConfig([]string{"ENTRYPOINT /bin/bash"})
+ require.Nil(t, err)
+ require.Equal(t, 3, len(binShEntrypoint.Entrypoint))
+ assert.Equal(t, binShEntrypoint.Entrypoint[0], "/bin/sh")
+ assert.Equal(t, binShEntrypoint.Entrypoint[1], "-c")
+ assert.Equal(t, binShEntrypoint.Entrypoint[2], "/bin/bash")
+
+ entrypointWithSpaces, err := GetImageConfig([]string{"ENTRYPOINT ls -al"})
+ require.Nil(t, err)
+ require.Equal(t, 3, len(entrypointWithSpaces.Entrypoint))
+ assert.Equal(t, entrypointWithSpaces.Entrypoint[0], "/bin/sh")
+ assert.Equal(t, entrypointWithSpaces.Entrypoint[1], "-c")
+ assert.Equal(t, entrypointWithSpaces.Entrypoint[2], "ls -al")
+
+ jsonArrayEntrypoint, err := GetImageConfig([]string{`ENTRYPOINT ["ls", "-al"]`})
+ require.Nil(t, err)
+ require.Equal(t, 2, len(jsonArrayEntrypoint.Entrypoint))
+ assert.Equal(t, jsonArrayEntrypoint.Entrypoint[0], "ls")
+ assert.Equal(t, jsonArrayEntrypoint.Entrypoint[1], "-al")
+
+ emptyEntrypoint, err := GetImageConfig([]string{"ENTRYPOINT "})
+ require.Nil(t, err)
+ assert.Equal(t, 0, len(emptyEntrypoint.Entrypoint))
+
+ emptyEntrypointArray, err := GetImageConfig([]string{"ENTRYPOINT []"})
+ require.Nil(t, err)
+ assert.Equal(t, 0, len(emptyEntrypointArray.Entrypoint))
+}
+
+func TestGetImageConfigCmd(t *testing.T) {
+ binShCmd, err := GetImageConfig([]string{"CMD /bin/bash"})
+ require.Nil(t, err)
+ require.Equal(t, 3, len(binShCmd.Cmd))
+ assert.Equal(t, binShCmd.Cmd[0], "/bin/sh")
+ assert.Equal(t, binShCmd.Cmd[1], "-c")
+ assert.Equal(t, binShCmd.Cmd[2], "/bin/bash")
+
+ cmdWithSpaces, err := GetImageConfig([]string{"CMD ls -al"})
+ require.Nil(t, err)
+ require.Equal(t, 3, len(cmdWithSpaces.Cmd))
+ assert.Equal(t, cmdWithSpaces.Cmd[0], "/bin/sh")
+ assert.Equal(t, cmdWithSpaces.Cmd[1], "-c")
+ assert.Equal(t, cmdWithSpaces.Cmd[2], "ls -al")
+
+ jsonArrayCmd, err := GetImageConfig([]string{`CMD ["ls", "-al"]`})
+ require.Nil(t, err)
+ require.Equal(t, 2, len(jsonArrayCmd.Cmd))
+ assert.Equal(t, jsonArrayCmd.Cmd[0], "ls")
+ assert.Equal(t, jsonArrayCmd.Cmd[1], "-al")
+
+ emptyCmd, err := GetImageConfig([]string{"CMD "})
+ require.Nil(t, err)
+ require.Equal(t, 2, len(emptyCmd.Cmd))
+ assert.Equal(t, emptyCmd.Cmd[0], "/bin/sh")
+ assert.Equal(t, emptyCmd.Cmd[1], "-c")
+
+ blankCmd, err := GetImageConfig([]string{"CMD []"})
+ require.Nil(t, err)
+ assert.Equal(t, 0, len(blankCmd.Cmd))
+}
+
+func TestGetImageConfigVolume(t *testing.T) {
+ oneLenJSONArrayVol, err := GetImageConfig([]string{`VOLUME ["/test1"]`})
+ require.Nil(t, err)
+ _, exists := oneLenJSONArrayVol.Volumes["/test1"]
+ assert.True(t, exists)
+ assert.Equal(t, 1, len(oneLenJSONArrayVol.Volumes))
+
+ twoLenJSONArrayVol, err := GetImageConfig([]string{`VOLUME ["/test1", "/test2"]`})
+ require.Nil(t, err)
+ assert.Equal(t, 2, len(twoLenJSONArrayVol.Volumes))
+ _, exists = twoLenJSONArrayVol.Volumes["/test1"]
+ assert.True(t, exists)
+ _, exists = twoLenJSONArrayVol.Volumes["/test2"]
+ assert.True(t, exists)
+
+ oneLenVol, err := GetImageConfig([]string{"VOLUME /test1"})
+ require.Nil(t, err)
+ _, exists = oneLenVol.Volumes["/test1"]
+ assert.True(t, exists)
+ assert.Equal(t, 1, len(oneLenVol.Volumes))
+
+ twoLenVol, err := GetImageConfig([]string{"VOLUME /test1 /test2"})
+ require.Nil(t, err)
+ assert.Equal(t, 2, len(twoLenVol.Volumes))
+ _, exists = twoLenVol.Volumes["/test1"]
+ assert.True(t, exists)
+ _, exists = twoLenVol.Volumes["/test2"]
+ assert.True(t, exists)
+
+ _, err = GetImageConfig([]string{"VOLUME []"})
+ assert.NotNil(t, err)
+
+ _, err = GetImageConfig([]string{"VOLUME "})
+ assert.NotNil(t, err)
+
+ _, err = GetImageConfig([]string{`VOLUME [""]`})
+ assert.NotNil(t, err)
+}
+
+func TestGetImageConfigWorkdir(t *testing.T) {
+ singleWorkdir, err := GetImageConfig([]string{"WORKDIR /testdir"})
+ require.Nil(t, err)
+ assert.Equal(t, singleWorkdir.WorkingDir, "/testdir")
+
+ twoWorkdirs, err := GetImageConfig([]string{"WORKDIR /testdir", "WORKDIR a"})
+ require.Nil(t, err)
+ assert.Equal(t, twoWorkdirs.WorkingDir, "/testdir/a")
+
+ _, err = GetImageConfig([]string{"WORKDIR "})
+ assert.NotNil(t, err)
+}
+
+func TestGetImageConfigLabel(t *testing.T) {
+ labelNoQuotes, err := GetImageConfig([]string{"LABEL key1=value1"})
+ require.Nil(t, err)
+ assert.Equal(t, labelNoQuotes.Labels["key1"], "value1")
+
+ labelWithQuotes, err := GetImageConfig([]string{`LABEL "key 1"="value 2"`})
+ require.Nil(t, err)
+ assert.Equal(t, labelWithQuotes.Labels["key 1"], "value 2")
+
+ labelNoValue, err := GetImageConfig([]string{"LABEL key="})
+ require.Nil(t, err)
+ contents, exists := labelNoValue.Labels["key"]
+ assert.True(t, exists)
+ assert.Equal(t, contents, "")
+
+ _, err = GetImageConfig([]string{"LABEL key"})
+ assert.NotNil(t, err)
+
+ _, err = GetImageConfig([]string{"LABEL "})
+ assert.NotNil(t, err)
+}
+
+func TestGetImageConfigStopSignal(t *testing.T) {
+ stopSignalValidInt, err := GetImageConfig([]string{"STOPSIGNAL 9"})
+ require.Nil(t, err)
+ assert.Equal(t, stopSignalValidInt.StopSignal, "9")
+
+ stopSignalValidString, err := GetImageConfig([]string{"STOPSIGNAL SIGKILL"})
+ require.Nil(t, err)
+ assert.Equal(t, stopSignalValidString.StopSignal, "9")
+
+ _, err = GetImageConfig([]string{"STOPSIGNAL 0"})
+ assert.NotNil(t, err)
+
+ _, err = GetImageConfig([]string{"STOPSIGNAL garbage"})
+ assert.NotNil(t, err)
+
+ _, err = GetImageConfig([]string{"STOPSIGNAL "})
+ assert.NotNil(t, err)
+}
+
+func TestGetImageConfigOnBuild(t *testing.T) {
+ onBuildOne, err := GetImageConfig([]string{"ONBUILD ADD /testdir1"})
+ require.Nil(t, err)
+ require.Equal(t, 1, len(onBuildOne.OnBuild))
+ assert.Equal(t, onBuildOne.OnBuild[0], "ADD /testdir1")
+
+ onBuildTwo, err := GetImageConfig([]string{"ONBUILD ADD /testdir1", "ONBUILD ADD /testdir2"})
+ require.Nil(t, err)
+ require.Equal(t, 2, len(onBuildTwo.OnBuild))
+ assert.Equal(t, onBuildTwo.OnBuild[0], "ADD /testdir1")
+ assert.Equal(t, onBuildTwo.OnBuild[1], "ADD /testdir2")
+
+ _, err = GetImageConfig([]string{"ONBUILD "})
+ assert.NotNil(t, err)
+}
+
+func TestGetImageConfigMisc(t *testing.T) {
+ _, err := GetImageConfig([]string{""})
+ assert.NotNil(t, err)
+
+ _, err = GetImageConfig([]string{"USER"})
+ assert.NotNil(t, err)
+
+ _, err = GetImageConfig([]string{"BADINST testvalue"})
+ assert.NotNil(t, err)
}
diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go
index 8d44e6373..604a455a5 100644
--- a/pkg/varlinkapi/images.go
+++ b/pkg/varlinkapi/images.go
@@ -21,7 +21,7 @@ import (
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/cmd/podman/shared"
- "github.com/containers/libpod/cmd/podman/varlink"
+ iopodman "github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/image"
@@ -29,7 +29,7 @@ import (
"github.com/containers/libpod/pkg/util"
"github.com/containers/libpod/utils"
"github.com/containers/storage/pkg/archive"
- "github.com/opencontainers/image-spec/specs-go/v1"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -70,6 +70,7 @@ func (i *LibpodAPI) ListImages(call iopodman.VarlinkCall) error {
Labels: labels,
IsParent: isParent,
ReadOnly: image.IsReadOnly(),
+ History: image.NamesHistory(),
}
imageList = append(imageList, i)
}
@@ -111,6 +112,7 @@ func (i *LibpodAPI) GetImage(call iopodman.VarlinkCall, id string) error {
Labels: labels,
TopLayer: newImage.TopLayer(),
ReadOnly: newImage.IsReadOnly(),
+ History: newImage.NamesHistory(),
}
return call.ReplyGetImage(il)
}
@@ -600,7 +602,7 @@ func (i *LibpodAPI) ImportImage(call iopodman.VarlinkCall, source, reference, me
{Comment: message},
}
config := v1.Image{
- Config: configChanges,
+ Config: configChanges.ImageConfig,
History: history,
}
newImage, err := i.Runtime.ImageRuntime().Import(getContext(), source, reference, nil, image.SigningOptions{}, config)
@@ -740,8 +742,8 @@ func (i *LibpodAPI) ContainerRunlabel(call iopodman.VarlinkCall, input iopodman.
}
// ImagesPrune ....
-func (i *LibpodAPI) ImagesPrune(call iopodman.VarlinkCall, all bool) error {
- prunedImages, err := i.Runtime.ImageRuntime().PruneImages(context.TODO(), all)
+func (i *LibpodAPI) ImagesPrune(call iopodman.VarlinkCall, all bool, filter []string) error {
+ prunedImages, err := i.Runtime.ImageRuntime().PruneImages(context.TODO(), all, []string{})
if err != nil {
return call.ReplyErrorOccurred(err.Error())
}
diff --git a/pkg/varlinkapi/pods.go b/pkg/varlinkapi/pods.go
index 9b659f66b..1ebe5d424 100644
--- a/pkg/varlinkapi/pods.go
+++ b/pkg/varlinkapi/pods.go
@@ -247,7 +247,7 @@ func (i *LibpodAPI) RemovePod(call iopodman.VarlinkCall, name string, force bool
if err != nil {
return call.ReplyPodNotFound(name, err.Error())
}
- if err = i.Runtime.RemovePod(ctx, pod, force, force); err != nil {
+ if err = i.Runtime.RemovePod(ctx, pod, true, force); err != nil {
return call.ReplyErrorOccurred(err.Error())
}
diff --git a/pkg/varlinkapi/system.go b/pkg/varlinkapi/system.go
index f6057f5fc..b81ff11ba 100644
--- a/pkg/varlinkapi/system.go
+++ b/pkg/varlinkapi/system.go
@@ -3,12 +3,15 @@
package varlinkapi
import (
+ "context"
"fmt"
- "github.com/containers/libpod/libpod/define"
+ "os"
goruntime "runtime"
"time"
"github.com/containers/libpod/cmd/podman/varlink"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/sirupsen/logrus"
)
// GetVersion ...
@@ -105,3 +108,20 @@ func (i *LibpodAPI) GetInfo(call iopodman.VarlinkCall) error {
podmanInfo.Insecure_registries = insecureRegistries
return call.ReplyGetInfo(podmanInfo)
}
+
+// GetVersion ...
+func (i *LibpodAPI) Reset(call iopodman.VarlinkCall) error {
+ if err := i.Runtime.Reset(context.TODO()); err != nil {
+ logrus.Errorf("Reset Failed: %v", err)
+ if err := call.ReplyErrorOccurred(err.Error()); err != nil {
+ logrus.Errorf("Failed to send ReplyErrorOccurred: %v", err)
+ }
+ os.Exit(define.ExecErrorCodeGeneric)
+ }
+ if err := call.ReplyReset(); err != nil {
+ logrus.Errorf("Failed to send ReplyReset: %v", err)
+ os.Exit(define.ExecErrorCodeGeneric)
+ }
+ os.Exit(0)
+ return nil
+}
diff --git a/rootless.md b/rootless.md
index 4fb3c7deb..d8997a261 100644
--- a/rootless.md
+++ b/rootless.md
@@ -42,3 +42,5 @@ can easily fail
* Pause and Unpause (Works with cgroup V2 support)
* Issues with higher UIDs can cause builds to fail
* If a build is attempting to use a UID that is not mapped into the user namespace mapping for a container, then builds will not be able to put the UID in an image.
+* Making device nodes within a container fails, even when running --privileged.
+ * The kernel does not allow non root user processes (processes without CAP_MKNOD) to create device nodes. If a container needs to create device nodes, it must be run as root.
diff --git a/test/e2e/build/basicalpine/Containerfile b/test/e2e/build/basicalpine/Containerfile
new file mode 100644
index 000000000..67fd37901
--- /dev/null
+++ b/test/e2e/build/basicalpine/Containerfile
@@ -0,0 +1 @@
+FROM alpine
diff --git a/test/e2e/build/context_dir_a_file b/test/e2e/build/context_dir_a_file
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/test/e2e/build/context_dir_a_file
diff --git a/test/e2e/build/squash/Dockerfile.squash-a b/test/e2e/build/squash/Dockerfile.squash-a
new file mode 100644
index 000000000..f084e093d
--- /dev/null
+++ b/test/e2e/build/squash/Dockerfile.squash-a
@@ -0,0 +1,2 @@
+FROM busybox:latest
+ADD alpinetest.tgz /data
diff --git a/test/e2e/build/squash/Dockerfile.squash-b b/test/e2e/build/squash/Dockerfile.squash-b
new file mode 100644
index 000000000..4c5fdb153
--- /dev/null
+++ b/test/e2e/build/squash/Dockerfile.squash-b
@@ -0,0 +1,2 @@
+FROM test-squash-a:latest
+RUN rm -rf /data
diff --git a/test/e2e/build/squash/Dockerfile.squash-c b/test/e2e/build/squash/Dockerfile.squash-c
new file mode 100644
index 000000000..df9c90388
--- /dev/null
+++ b/test/e2e/build/squash/Dockerfile.squash-c
@@ -0,0 +1,3 @@
+FROM busybox:latest
+ADD alpinetest.tgz /data
+RUN rm -rf /data
diff --git a/test/e2e/build/squash/alpinetest.tgz b/test/e2e/build/squash/alpinetest.tgz
new file mode 100644
index 000000000..9b8e7bda7
--- /dev/null
+++ b/test/e2e/build/squash/alpinetest.tgz
Binary files differ
diff --git a/test/e2e/build_test.go b/test/e2e/build_test.go
new file mode 100644
index 000000000..71f5d1b02
--- /dev/null
+++ b/test/e2e/build_test.go
@@ -0,0 +1,108 @@
+// +build !remoteclient
+
+package integration
+
+import (
+ "os"
+ "strings"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman build", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.Setup()
+ podmanTest.RestoreAllArtifacts()
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ processTestResult(f)
+ })
+
+ // Let's first do the most simple build possible to make sure stuff is
+ // happy and then clean up after ourselves to make sure that works too.
+ It("podman build and remove basic alpine", func() {
+ session := podmanTest.PodmanNoCache([]string{"build", "build/basicalpine"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.PodmanNoCache([]string{"rmi", "alpine"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+
+ // If the context directory is pointing at a file and not a directory,
+ // that's a no no, fail out.
+ It("podman build context directory a file", func() {
+ session := podmanTest.PodmanNoCache([]string{"build", "build/context_dir_a_file"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(125))
+ })
+
+ // Check that builds with different values for the squash options
+ // create the appropriate number of layers, then clean up after.
+ It("podman build basic alpine with squash", func() {
+ session := podmanTest.PodmanNoCache([]string{"build", "-f", "build/squash/Dockerfile.squash-a", "-t", "test-squash-a:latest", "build/squash"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.PodmanNoCache([]string{"inspect", "--format", "{{.RootFS.Layers}}", "test-squash-a"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ // Check for two layers
+ Expect(len(strings.Fields(session.OutputToString()))).To(Equal(2))
+
+ session = podmanTest.PodmanNoCache([]string{"build", "-f", "build/squash/Dockerfile.squash-b", "--squash", "-t", "test-squash-b:latest", "build/squash"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.PodmanNoCache([]string{"inspect", "--format", "{{.RootFS.Layers}}", "test-squash-b"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ // Check for three layers
+ Expect(len(strings.Fields(session.OutputToString()))).To(Equal(3))
+
+ session = podmanTest.PodmanNoCache([]string{"build", "-f", "build/squash/Dockerfile.squash-c", "--squash", "-t", "test-squash-c:latest", "build/squash"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.PodmanNoCache([]string{"inspect", "--format", "{{.RootFS.Layers}}", "test-squash-c"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ // Check for two layers
+ Expect(len(strings.Fields(session.OutputToString()))).To(Equal(2))
+
+ session = podmanTest.PodmanNoCache([]string{"build", "-f", "build/squash/Dockerfile.squash-c", "--squash-all", "-t", "test-squash-d:latest", "build/squash"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.PodmanNoCache([]string{"inspect", "--format", "{{.RootFS.Layers}}", "test-squash-d"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ // Check for one layers
+ Expect(len(strings.Fields(session.OutputToString()))).To(Equal(1))
+
+ session = podmanTest.PodmanNoCache([]string{"rm", "-a"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.PodmanNoCache([]string{"rmi", "-a", "-f"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+})
diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go
index 2d3efcbef..f208a4cf0 100644
--- a/test/e2e/checkpoint_test.go
+++ b/test/e2e/checkpoint_test.go
@@ -588,4 +588,47 @@ var _ = Describe("Podman checkpoint", func() {
// Remove exported checkpoint
os.Remove(fileName)
})
+
+ It("podman checkpoint a container started with --rm", func() {
+ // Start the container
+ localRunString := getRunString([]string{"--rm", ALPINE, "top"})
+ session := podmanTest.Podman(localRunString)
+ session.WaitWithDefaultTimeout()
+ cid := session.OutputToString()
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+
+ // Checkpoint the container - this should fail as it was started with --rm
+ result := podmanTest.Podman([]string{"container", "checkpoint", "-l"})
+ result.WaitWithDefaultTimeout()
+ Expect(result).To(ExitWithError())
+ Expect(result.ErrorToString()).To(ContainSubstring("Cannot checkpoint containers that have been started with '--rm'"))
+
+ // Checkpointing with --export should still work
+ fileName := "/tmp/checkpoint-" + cid + ".tar.gz"
+
+ result = podmanTest.Podman([]string{"container", "checkpoint", "-l", "-e", fileName})
+ result.WaitWithDefaultTimeout()
+
+ // As the container has been started with '--rm' it will be completely
+ // cleaned up after checkpointing.
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(0))
+
+ result = podmanTest.Podman([]string{"container", "restore", "-i", fileName})
+ result.WaitWithDefaultTimeout()
+
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
+
+ result = podmanTest.Podman([]string{"rm", "-fa"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(0))
+
+ // Remove exported checkpoint
+ os.Remove(fileName)
+ })
})
diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go
index b390df8b2..16b971e65 100644
--- a/test/e2e/common_test.go
+++ b/test/e2e/common_test.go
@@ -559,3 +559,12 @@ func (p *PodmanTestIntegration) RunHealthCheck(cid string) error {
}
return errors.Errorf("unable to detect %s as running", cid)
}
+
+func (p *PodmanTestIntegration) CreateSeccompJson(in []byte) (string, error) {
+ jsonFile := filepath.Join(p.TempDir, "seccomp.json")
+ err := WriteJsonFile(in, jsonFile)
+ if err != nil {
+ return "", err
+ }
+ return jsonFile, nil
+}
diff --git a/test/e2e/images_test.go b/test/e2e/images_test.go
index e125c62b4..80e6d4444 100644
--- a/test/e2e/images_test.go
+++ b/test/e2e/images_test.go
@@ -360,7 +360,6 @@ LABEL "com.example.vendor"="Example Vendor"
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
output = session.OutputToString()
- Expect(output).To(Not(MatchRegexp("<missing>")))
Expect(output).To(Not(MatchRegexp("error")))
session = podmanTest.Podman([]string{"history", "--quiet", "foo"})
diff --git a/test/e2e/import_test.go b/test/e2e/import_test.go
index 979440a50..bceb30f7c 100644
--- a/test/e2e/import_test.go
+++ b/test/e2e/import_test.go
@@ -105,7 +105,9 @@ var _ = Describe("Podman import", func() {
results.WaitWithDefaultTimeout()
Expect(results.ExitCode()).To(Equal(0))
imageData := results.InspectImageJSON()
- Expect(imageData[0].Config.Cmd[0]).To(Equal("/bin/bash"))
+ Expect(imageData[0].Config.Cmd[0]).To(Equal("/bin/sh"))
+ Expect(imageData[0].Config.Cmd[1]).To(Equal("-c"))
+ Expect(imageData[0].Config.Cmd[2]).To(Equal("/bin/bash"))
})
It("podman import with change flag CMD <path>", func() {
@@ -126,6 +128,8 @@ var _ = Describe("Podman import", func() {
Expect(results.ExitCode()).To(Equal(0))
imageData := results.InspectImageJSON()
Expect(imageData[0].Config.Cmd[0]).To(Equal("/bin/sh"))
+ Expect(imageData[0].Config.Cmd[1]).To(Equal("-c"))
+ Expect(imageData[0].Config.Cmd[2]).To(Equal("/bin/sh"))
})
It("podman import with change flag CMD [\"path\",\"path'\"", func() {
@@ -137,7 +141,7 @@ var _ = Describe("Podman import", func() {
export.WaitWithDefaultTimeout()
Expect(export.ExitCode()).To(Equal(0))
- importImage := podmanTest.Podman([]string{"import", "--change", "CMD [/bin/bash]", outfile, "imported-image"})
+ importImage := podmanTest.Podman([]string{"import", "--change", "CMD [\"/bin/bash\"]", outfile, "imported-image"})
importImage.WaitWithDefaultTimeout()
Expect(importImage.ExitCode()).To(Equal(0))
diff --git a/test/e2e/libpod_suite_test.go b/test/e2e/libpod_suite_test.go
index 5239f4d8e..43f08bf03 100644
--- a/test/e2e/libpod_suite_test.go
+++ b/test/e2e/libpod_suite_test.go
@@ -135,3 +135,7 @@ func removeCache() {
func (p *PodmanTestIntegration) SeedImages() error {
return nil
}
+
+// We don't support running Varlink when local
+func (p *PodmanTestIntegration) StartVarlink() {
+}
diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go
index 416c64b5a..29c60d7ac 100644
--- a/test/e2e/play_kube_test.go
+++ b/test/e2e/play_kube_test.go
@@ -3,6 +3,7 @@
package integration
import (
+ "fmt"
"os"
"path/filepath"
"text/template"
@@ -20,6 +21,13 @@ metadata:
labels:
app: {{ .Name }}
name: {{ .Name }}
+{{ with .Annotations }}
+ annotations:
+ {{ range $key, $value := . }}
+ {{ $key }}: {{ $value }}
+ {{ end }}
+{{ end }}
+
spec:
hostname: {{ .Hostname }}
containers:
@@ -72,6 +80,7 @@ var (
defaultCtrCmd = []string{"top"}
defaultCtrImage = ALPINE
defaultPodName = "testPod"
+ seccompPwdEPERM = []byte(`{"defaultAction":"SCMP_ACT_ALLOW","syscalls":[{"name":"getcwd","action":"SCMP_ACT_ERRNO"}]}`)
)
func generateKubeYaml(pod *Pod, fileName string) error {
@@ -95,16 +104,17 @@ func generateKubeYaml(pod *Pod, fileName string) error {
// Pod describes the options a kube yaml can be configured at pod level
type Pod struct {
- Name string
- Hostname string
- Ctrs []*Ctr
+ Name string
+ Hostname string
+ Ctrs []*Ctr
+ Annotations map[string]string
}
// getPod takes a list of podOptions and returns a pod with sane defaults
// and the configured options
// if no containers are added, it will add the default container
func getPod(options ...podOption) *Pod {
- p := Pod{defaultPodName, "", make([]*Ctr, 0)}
+ p := Pod{defaultPodName, "", make([]*Ctr, 0), make(map[string]string)}
for _, option := range options {
option(&p)
}
@@ -128,6 +138,12 @@ func withCtr(c *Ctr) podOption {
}
}
+func withAnnotation(k, v string) podOption {
+ return func(pod *Pod) {
+ pod.Annotations[k] = v
+ }
+}
+
// Ctr describes the options a kube yaml can be configured at container level
type Ctr struct {
Name string
@@ -330,4 +346,51 @@ var _ = Describe("Podman generate kube", func() {
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
})
+
+ It("podman play kube seccomp container level", func() {
+ // expect play kube is expected to set a seccomp label if it's applied as an annotation
+ jsonFile, err := podmanTest.CreateSeccompJson(seccompPwdEPERM)
+ if err != nil {
+ fmt.Println(err)
+ Skip("Failed to prepare seccomp.json for test.")
+ }
+
+ ctrAnnotation := "container.seccomp.security.alpha.kubernetes.io/" + defaultCtrName
+ ctr := getCtr(withCmd([]string{"pwd"}))
+
+ err = generateKubeYaml(getPod(withCtr(ctr), withAnnotation(ctrAnnotation, "localhost:"+jsonFile)), kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ logs := podmanTest.Podman([]string{"logs", defaultCtrName})
+ logs.WaitWithDefaultTimeout()
+ Expect(logs.ExitCode()).To(Equal(0))
+ Expect(logs.OutputToString()).To(ContainSubstring("Operation not permitted"))
+ })
+
+ It("podman play kube seccomp pod level", func() {
+ // expect play kube is expected to set a seccomp label if it's applied as an annotation
+ jsonFile, err := podmanTest.CreateSeccompJson(seccompPwdEPERM)
+ if err != nil {
+ fmt.Println(err)
+ Skip("Failed to prepare seccomp.json for test.")
+ }
+
+ ctr := getCtr(withCmd([]string{"pwd"}))
+
+ err = generateKubeYaml(getPod(withCtr(ctr), withAnnotation("seccomp.security.alpha.kubernetes.io/pod", "localhost:"+jsonFile)), kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ logs := podmanTest.Podman([]string{"logs", defaultCtrName})
+ logs.WaitWithDefaultTimeout()
+ Expect(logs.ExitCode()).To(Equal(0))
+ Expect(logs.OutputToString()).To(ContainSubstring("Operation not permitted"))
+ })
})
diff --git a/test/e2e/pod_prune_test.go b/test/e2e/pod_prune_test.go
index da0d425cb..389d3cb27 100644
--- a/test/e2e/pod_prune_test.go
+++ b/test/e2e/pod_prune_test.go
@@ -41,7 +41,24 @@ var _ = Describe("Podman pod prune", func() {
Expect(result.ExitCode()).To(Equal(0))
})
- It("podman pod prune doesn't remove a pod with a container", func() {
+ It("podman pod prune doesn't remove a pod with a running container", func() {
+ _, ec, podid := podmanTest.CreatePod("")
+ Expect(ec).To(Equal(0))
+
+ ec2 := podmanTest.RunTopContainerInPod("", podid)
+ ec2.WaitWithDefaultTimeout()
+ Expect(ec2.ExitCode()).To(Equal(0))
+
+ result := podmanTest.Podman([]string{"pod", "prune"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To((Equal(0)))
+
+ result = podmanTest.Podman([]string{"ps", "-qa"})
+ result.WaitWithDefaultTimeout()
+ Expect(len(result.OutputToStringArray())).To(Equal(1))
+ })
+
+ It("podman pod prune removes a pod with a stopped container", func() {
_, ec, podid := podmanTest.CreatePod("")
Expect(ec).To(Equal(0))
@@ -50,11 +67,11 @@ var _ = Describe("Podman pod prune", func() {
result := podmanTest.Podman([]string{"pod", "prune"})
result.WaitWithDefaultTimeout()
- Expect(result.ExitCode()).To(Equal(125))
+ Expect(result.ExitCode()).To(Equal(0))
result = podmanTest.Podman([]string{"ps", "-qa"})
result.WaitWithDefaultTimeout()
- Expect(len(result.OutputToStringArray())).To(Equal(1))
+ Expect(len(result.OutputToStringArray())).To(Equal(0))
})
It("podman pod prune -f does remove a running container", func() {
diff --git a/test/e2e/pod_rm_test.go b/test/e2e/pod_rm_test.go
index de68e885a..90f178be6 100644
--- a/test/e2e/pod_rm_test.go
+++ b/test/e2e/pod_rm_test.go
@@ -77,7 +77,7 @@ var _ = Describe("Podman pod rm", func() {
Expect(result.OutputToString()).To(Not(ContainSubstring(podid2)))
})
- It("podman pod rm doesn't remove a pod with a container", func() {
+ It("podman pod rm removes a pod with a container", func() {
_, ec, podid := podmanTest.CreatePod("")
Expect(ec).To(Equal(0))
@@ -86,11 +86,11 @@ var _ = Describe("Podman pod rm", func() {
result := podmanTest.Podman([]string{"pod", "rm", podid})
result.WaitWithDefaultTimeout()
- Expect(result.ExitCode()).To(Equal(125))
+ Expect(result.ExitCode()).To(Equal(0))
result = podmanTest.Podman([]string{"ps", "-qa"})
result.WaitWithDefaultTimeout()
- Expect(len(result.OutputToStringArray())).To(Equal(1))
+ Expect(len(result.OutputToStringArray())).To(Equal(0))
})
It("podman pod rm -f does remove a running container", func() {
@@ -136,7 +136,7 @@ var _ = Describe("Podman pod rm", func() {
result := podmanTest.Podman([]string{"pod", "rm", "-a"})
result.WaitWithDefaultTimeout()
Expect(result).To(ExitWithError())
- foundExpectedError, _ := result.ErrorGrepString("contains containers and cannot be removed")
+ foundExpectedError, _ := result.ErrorGrepString("cannot be removed")
Expect(foundExpectedError).To(Equal(true))
num_pods = podmanTest.NumberOfPods()
@@ -186,4 +186,47 @@ var _ = Describe("Podman pod rm", func() {
result.WaitWithDefaultTimeout()
Expect(result.OutputToString()).To(BeEmpty())
})
+
+ It("podman rm bogus pod", func() {
+ session := podmanTest.Podman([]string{"pod", "rm", "bogus"})
+ session.WaitWithDefaultTimeout()
+ // TODO: `podman rm` returns 1 for a bogus container. Should the RC be consistent?
+ Expect(session.ExitCode()).To(Equal(125))
+ })
+
+ It("podman rm bogus pod and a running pod", func() {
+ _, ec, podid1 := podmanTest.CreatePod("")
+ Expect(ec).To(Equal(0))
+
+ session := podmanTest.RunTopContainerInPod("test1", podid1)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "rm", "bogus", "test1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(125))
+
+ session = podmanTest.Podman([]string{"pod", "rm", "test1", "bogus"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(125))
+ })
+
+ It("podman rm --ignore bogus pod and a running pod", func() {
+ SkipIfRemote()
+
+ _, ec, podid1 := podmanTest.CreatePod("")
+ Expect(ec).To(Equal(0))
+
+ session := podmanTest.RunTopContainerInPod("test1", podid1)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "rm", "--force", "--ignore", "bogus", "test1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "rm", "--ignore", "test1", "bogus"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
})
diff --git a/test/e2e/pod_stats_test.go b/test/e2e/pod_stats_test.go
index 4d573a2c7..347f33e62 100644
--- a/test/e2e/pod_stats_test.go
+++ b/test/e2e/pod_stats_test.go
@@ -5,6 +5,7 @@ package integration
import (
"os"
+ "github.com/containers/libpod/pkg/cgroups"
. "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -13,12 +14,17 @@ import (
var _ = Describe("Podman pod stats", func() {
var (
tempdir string
- err error
podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
- SkipIfRootless()
+ cgroupsv2, err := cgroups.IsCgroup2UnifiedMode()
+ Expect(err).To(BeNil())
+
+ if os.Geteuid() != 0 && !cgroupsv2 {
+ Skip("This function is not enabled for rootless podman not running on cgroups v2")
+ }
+
tempdir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
diff --git a/test/e2e/pod_stop_test.go b/test/e2e/pod_stop_test.go
index 361a63a7f..a61917adb 100644
--- a/test/e2e/pod_stop_test.go
+++ b/test/e2e/pod_stop_test.go
@@ -38,6 +38,46 @@ var _ = Describe("Podman pod stop", func() {
Expect(session.ExitCode()).To(Equal(125))
})
+ It("podman pod stop --ignore bogus pod", func() {
+ SkipIfRemote()
+
+ session := podmanTest.Podman([]string{"pod", "stop", "--ignore", "123"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+
+ It("podman stop bogus pod and a running pod", func() {
+ _, ec, podid1 := podmanTest.CreatePod("")
+ Expect(ec).To(Equal(0))
+
+ session := podmanTest.RunTopContainerInPod("test1", podid1)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "stop", "bogus", "test1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(125))
+ })
+
+ It("podman stop --ignore bogus pod and a running pod", func() {
+ SkipIfRemote()
+
+ _, ec, podid1 := podmanTest.CreatePod("")
+ Expect(ec).To(Equal(0))
+
+ session := podmanTest.RunTopContainerInPod("test1", podid1)
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "stop", "--ignore", "bogus", "test1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "stop", "--ignore", "test1", "bogus"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+
It("podman pod stop single empty pod", func() {
_, ec, podid := podmanTest.CreatePod("")
Expect(ec).To(Equal(0))
diff --git a/test/e2e/prune_test.go b/test/e2e/prune_test.go
index df0525a79..c9b01ad4a 100644
--- a/test/e2e/prune_test.go
+++ b/test/e2e/prune_test.go
@@ -43,11 +43,16 @@ var _ = Describe("Podman prune", func() {
top.WaitWithDefaultTimeout()
Expect(top.ExitCode()).To(Equal(0))
- session := podmanTest.Podman([]string{"run", ALPINE, "ls"})
- session.WaitWithDefaultTimeout()
- Expect(session.ExitCode()).To(Equal(0))
+ top = podmanTest.RunTopContainer("")
+ top.WaitWithDefaultTimeout()
+ Expect(top.ExitCode()).To(Equal(0))
+ cid := top.OutputToString()
+
+ stop := podmanTest.Podman([]string{"stop", cid})
+ stop.WaitWithDefaultTimeout()
+ Expect(stop.ExitCode()).To(Equal(0))
- prune := podmanTest.Podman([]string{"container", "prune"})
+ prune := podmanTest.Podman([]string{"container", "prune", "-f"})
prune.WaitWithDefaultTimeout()
Expect(prune.ExitCode()).To(Equal(0))
@@ -64,7 +69,7 @@ var _ = Describe("Podman prune", func() {
hasNone, _ := none.GrepString("<none>")
Expect(hasNone).To(BeTrue())
- prune := podmanTest.Podman([]string{"image", "prune"})
+ prune := podmanTest.Podman([]string{"image", "prune", "-f"})
prune.WaitWithDefaultTimeout()
Expect(prune.ExitCode()).To(Equal(0))
@@ -78,7 +83,7 @@ var _ = Describe("Podman prune", func() {
It("podman image prune unused images", func() {
podmanTest.RestoreAllArtifacts()
- prune := podmanTest.PodmanNoCache([]string{"image", "prune", "-a"})
+ prune := podmanTest.PodmanNoCache([]string{"image", "prune", "-af"})
prune.WaitWithDefaultTimeout()
Expect(prune.ExitCode()).To(Equal(0))
diff --git a/test/e2e/pull_test.go b/test/e2e/pull_test.go
index eee0b131b..96340ef30 100644
--- a/test/e2e/pull_test.go
+++ b/test/e2e/pull_test.go
@@ -339,7 +339,7 @@ var _ = Describe("Podman pull", func() {
})
It("podman pull check all tags", func() {
- session := podmanTest.PodmanNoCache([]string{"pull", "--all-tags", "alpine"})
+ session := podmanTest.PodmanNoCache([]string{"pull", "--all-tags", "k8s.gcr.io/pause"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(session.LineInOuputStartsWith("Pulled Images:")).To(BeTrue())
@@ -348,10 +348,6 @@ var _ = Describe("Podman pull", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(len(session.OutputToStringArray())).To(BeNumerically(">", 4))
-
- rmi := podmanTest.PodmanNoCache([]string{"rmi", "-a", "-f"})
- rmi.WaitWithDefaultTimeout()
- Expect(rmi.ExitCode()).To(Equal(0))
})
It("podman pull from docker with nonexist --authfile", func() {
diff --git a/test/e2e/rm_test.go b/test/e2e/rm_test.go
index 8ee7dc750..4eb568879 100644
--- a/test/e2e/rm_test.go
+++ b/test/e2e/rm_test.go
@@ -1,6 +1,7 @@
package integration
import (
+ "io/ioutil"
"os"
. "github.com/containers/libpod/test/utils"
@@ -138,11 +139,86 @@ var _ = Describe("Podman rm", func() {
})
+ It("podman rm --cidfile", func() {
+ SkipIfRemote()
+
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ tmpFile := tmpDir + "cid"
+
+ defer os.RemoveAll(tmpDir)
+
+ session := podmanTest.Podman([]string{"create", "--cidfile", tmpFile, ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ cid := session.OutputToStringArray()[0]
+ Expect(podmanTest.NumberOfContainers()).To(Equal(1))
+
+ result := podmanTest.Podman([]string{"rm", "--cidfile", tmpFile})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ output := result.OutputToString()
+ Expect(output).To(ContainSubstring(cid))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(0))
+ })
+
+ It("podman rm multiple --cidfile", func() {
+ SkipIfRemote()
+
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ tmpFile1 := tmpDir + "cid-1"
+ tmpFile2 := tmpDir + "cid-2"
+
+ defer os.RemoveAll(tmpDir)
+
+ session := podmanTest.Podman([]string{"create", "--cidfile", tmpFile1, ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ cid1 := session.OutputToStringArray()[0]
+ Expect(podmanTest.NumberOfContainers()).To(Equal(1))
+
+ session = podmanTest.Podman([]string{"create", "--cidfile", tmpFile2, ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ cid2 := session.OutputToStringArray()[0]
+ Expect(podmanTest.NumberOfContainers()).To(Equal(2))
+
+ result := podmanTest.Podman([]string{"rm", "--cidfile", tmpFile1, "--cidfile", tmpFile2})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ output := result.OutputToString()
+ Expect(output).To(ContainSubstring(cid1))
+ Expect(output).To(ContainSubstring(cid2))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(0))
+ })
+
+ It("podman rm invalid --latest and --cidfile and --all", func() {
+ SkipIfRemote()
+
+ result := podmanTest.Podman([]string{"rm", "--cidfile", "foobar", "--latest"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(125))
+
+ result = podmanTest.Podman([]string{"rm", "--cidfile", "foobar", "--all"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(125))
+
+ result = podmanTest.Podman([]string{"rm", "--cidfile", "foobar", "--all", "--latest"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(125))
+
+ result = podmanTest.Podman([]string{"rm", "--latest", "--all"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(125))
+ })
+
It("podman rm bogus container", func() {
session := podmanTest.Podman([]string{"rm", "bogus"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(1))
})
+
It("podman rm bogus container and a running container", func() {
session := podmanTest.RunTopContainer("test1")
session.WaitWithDefaultTimeout()
@@ -156,4 +232,20 @@ var _ = Describe("Podman rm", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(125))
})
+
+ It("podman rm --ignore bogus container and a running container", func() {
+ SkipIfRemote()
+
+ session := podmanTest.RunTopContainer("test1")
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"rm", "--force", "--ignore", "bogus", "test1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"rm", "--ignore", "test1", "bogus"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
})
diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go
index ec12f709a..5e587b198 100644
--- a/test/e2e/run_networking_test.go
+++ b/test/e2e/run_networking_test.go
@@ -232,4 +232,18 @@ var _ = Describe("Podman run networking", func() {
Expect(session).To(ExitWithError())
Expect(session.ErrorToString()).To(ContainSubstring("stat /run/netns/xxy: no such file or directory"))
})
+
+ It("podman run in custom CNI network with --static-ip", func() {
+ SkipIfRootless()
+ netName := "podmantestnetwork"
+ ipAddr := "10.20.30.128"
+ create := podmanTest.Podman([]string{"network", "create", "--subnet", "10.20.30.0/24", netName})
+ create.WaitWithDefaultTimeout()
+ Expect(create.ExitCode()).To(BeZero())
+
+ run := podmanTest.Podman([]string{"run", "-t", "-i", "--rm", "--net", netName, "--ip", ipAddr, ALPINE, "ip", "addr"})
+ run.WaitWithDefaultTimeout()
+ Expect(run.ExitCode()).To(BeZero())
+ Expect(run.OutputToString()).To(ContainSubstring(ipAddr))
+ })
})
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index 7fc85c9ce..72547ea00 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -160,9 +160,9 @@ var _ = Describe("Podman run", func() {
})
It("podman run seccomp test", func() {
- jsonFile := filepath.Join(podmanTest.TempDir, "seccomp.json")
+
in := []byte(`{"defaultAction":"SCMP_ACT_ALLOW","syscalls":[{"name":"getcwd","action":"SCMP_ACT_ERRNO"}]}`)
- err := WriteJsonFile(in, jsonFile)
+ jsonFile, err := podmanTest.CreateSeccompJson(in)
if err != nil {
fmt.Println(err)
Skip("Failed to prepare seccomp.json for test.")
diff --git a/test/e2e/stats_test.go b/test/e2e/stats_test.go
index fbf7c9920..762417a17 100644
--- a/test/e2e/stats_test.go
+++ b/test/e2e/stats_test.go
@@ -6,6 +6,7 @@ import (
"fmt"
"os"
+ "github.com/containers/libpod/pkg/cgroups"
. "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -16,12 +17,16 @@ import (
var _ = Describe("Podman stats", func() {
var (
tempdir string
- err error
podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
- SkipIfRootless()
+ cgroupsv2, err := cgroups.IsCgroup2UnifiedMode()
+ Expect(err).To(BeNil())
+
+ if os.Geteuid() != 0 && !cgroupsv2 {
+ Skip("This function is not enabled for rootless podman not running on cgroups v2")
+ }
tempdir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
diff --git a/test/e2e/stop_test.go b/test/e2e/stop_test.go
index 77ab6dbb0..54c64d66b 100644
--- a/test/e2e/stop_test.go
+++ b/test/e2e/stop_test.go
@@ -1,6 +1,7 @@
package integration
import (
+ "io/ioutil"
"os"
"strings"
@@ -39,6 +40,21 @@ var _ = Describe("Podman stop", func() {
Expect(session.ExitCode()).To(Equal(125))
})
+ It("podman stop --ignore bogus container", func() {
+ SkipIfRemote()
+
+ session := podmanTest.RunTopContainer("")
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ cid := session.OutputToString()
+
+ session = podmanTest.Podman([]string{"stop", "--ignore", "foobar", cid})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ output := session.OutputToString()
+ Expect(output).To(ContainSubstring(cid))
+ })
+
It("podman stop container by id", func() {
session := podmanTest.RunTopContainer("")
session.WaitWithDefaultTimeout()
@@ -215,4 +231,79 @@ var _ = Describe("Podman stop", func() {
Expect(strings.TrimSpace(finalCtrs.OutputToString())).To(Equal(""))
})
+ It("podman stop --cidfile", func() {
+ SkipIfRemote()
+
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ tmpFile := tmpDir + "cid"
+
+ defer os.RemoveAll(tmpDir)
+
+ session := podmanTest.Podman([]string{"create", "--cidfile", tmpFile, "-d", ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ cid := session.OutputToStringArray()[0]
+
+ session = podmanTest.Podman([]string{"start", cid})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ result := podmanTest.Podman([]string{"stop", "--cidfile", tmpFile})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ output := result.OutputToString()
+ Expect(output).To(ContainSubstring(cid))
+ })
+
+ It("podman stop multiple --cidfile", func() {
+ SkipIfRemote()
+
+ tmpDir, err := ioutil.TempDir("", "")
+ Expect(err).To(BeNil())
+ tmpFile1 := tmpDir + "cid-1"
+ tmpFile2 := tmpDir + "cid-2"
+
+ defer os.RemoveAll(tmpDir)
+
+ session := podmanTest.Podman([]string{"run", "--cidfile", tmpFile1, "-d", ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ cid1 := session.OutputToStringArray()[0]
+ Expect(podmanTest.NumberOfContainers()).To(Equal(1))
+
+ session = podmanTest.Podman([]string{"run", "--cidfile", tmpFile2, "-d", ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ cid2 := session.OutputToStringArray()[0]
+ Expect(podmanTest.NumberOfContainers()).To(Equal(2))
+
+ result := podmanTest.Podman([]string{"stop", "--cidfile", tmpFile1, "--cidfile", tmpFile2})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ output := result.OutputToString()
+ Expect(output).To(ContainSubstring(cid1))
+ Expect(output).To(ContainSubstring(cid2))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(2))
+ })
+
+ It("podman stop invalid --latest and --cidfile and --all", func() {
+ SkipIfRemote()
+
+ result := podmanTest.Podman([]string{"stop", "--cidfile", "foobar", "--latest"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(125))
+
+ result = podmanTest.Podman([]string{"stop", "--cidfile", "foobar", "--all"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(125))
+
+ result = podmanTest.Podman([]string{"stop", "--cidfile", "foobar", "--all", "--latest"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(125))
+
+ result = podmanTest.Podman([]string{"stop", "--latest", "--all"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(125))
+ })
})
diff --git a/test/e2e/system_reset_test.go b/test/e2e/system_reset_test.go
new file mode 100644
index 000000000..e5ce69739
--- /dev/null
+++ b/test/e2e/system_reset_test.go
@@ -0,0 +1,83 @@
+package integration
+
+import (
+ "fmt"
+ "os"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("podman system reset", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.Setup()
+ podmanTest.SeedImages()
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
+ GinkgoWriter.Write([]byte(timedResult))
+ })
+
+ It("podman system reset", func() {
+ // system reset will not remove additional store images, so need to grab length
+
+ session := podmanTest.Podman([]string{"rmi", "--force", "--all"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"images", "-n"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ l := len(session.OutputToStringArray())
+
+ session = podmanTest.Podman([]string{"pull", ALPINE})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"volume", "create", "data"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"create", "-v", "data:/data", ALPINE, "echo", "hello"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"system", "reset", "-f"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // If remote then the varlink service should have exited
+ // On local tests this is a noop
+ podmanTest.StartVarlink()
+
+ session = podmanTest.Podman([]string{"images", "-n"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(l))
+
+ session = podmanTest.Podman([]string{"volume", "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"container", "ls", "-q"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(0))
+ })
+})
diff --git a/test/e2e/version_test.go b/test/e2e/version_test.go
index 0db2e2cf2..c2af613aa 100644
--- a/test/e2e/version_test.go
+++ b/test/e2e/version_test.go
@@ -33,7 +33,6 @@ var _ = Describe("Podman version", func() {
})
It("podman version", func() {
- SkipIfRemote()
session := podmanTest.Podman([]string{"version"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -43,7 +42,6 @@ var _ = Describe("Podman version", func() {
})
It("podman -v", func() {
- SkipIfRemote()
session := podmanTest.Podman([]string{"-v"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -52,7 +50,6 @@ var _ = Describe("Podman version", func() {
})
It("podman --version", func() {
- SkipIfRemote()
session := podmanTest.Podman([]string{"--version"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -61,7 +58,6 @@ var _ = Describe("Podman version", func() {
})
It("podman version --format json", func() {
- SkipIfRemote()
session := podmanTest.Podman([]string{"version", "--format", "json"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -69,7 +65,6 @@ var _ = Describe("Podman version", func() {
})
It("podman version --format json", func() {
- SkipIfRemote()
session := podmanTest.Podman([]string{"version", "--format", "{{ json .}}"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -77,8 +72,15 @@ var _ = Describe("Podman version", func() {
})
It("podman version --format GO template", func() {
- SkipIfRemote()
- session := podmanTest.Podman([]string{"version", "--format", "{{ .Version }}"})
+ session := podmanTest.Podman([]string{"version", "--format", "{{ .Client.Version }}"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"version", "--format", "{{ .Server.Version }}"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"version", "--format", "{{ .Version }}"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
})
diff --git a/test/system/010-images.bats b/test/system/010-images.bats
index 380623078..543876509 100644
--- a/test/system/010-images.bats
+++ b/test/system/010-images.bats
@@ -44,4 +44,19 @@ size | [0-9]\\\+
}
+@test "podman images - history output" {
+ run_podman images --format json
+ actual=$(echo $output | jq -r '.[0].history | length')
+ is "$actual" "0"
+
+ run_podman tag $PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODMAN_TEST_IMAGE_NAME:$PODMAN_TEST_IMAGE_TAG test-image
+ run_podman images --format json
+ actual=$(echo $output | jq -r '.[1].history | length')
+ is "$actual" "0"
+ actual=$(echo $output | jq -r '.[0].history | length')
+ is "$actual" "1"
+ actual=$(echo $output | jq -r '.[0].history[0]')
+ is "$actual" "$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODMAN_TEST_IMAGE_NAME:$PODMAN_TEST_IMAGE_TAG"
+}
+
# vim: filetype=sh
diff --git a/test/system/070-build.bats b/test/system/070-build.bats
index a9d2ed1b7..7c39da72c 100644
--- a/test/system/070-build.bats
+++ b/test/system/070-build.bats
@@ -32,6 +32,54 @@ EOF
run_podman rmi -f build_test
}
+# Regression from v1.5.0. This test passes fine in v1.5.0, fails in 1.6
+@test "podman build - cache (#3920)" {
+ if is_remote && is_rootless; then
+ skip "unreliable with podman-remote and rootless; #2972"
+ fi
+
+ # Make an empty test directory, with a subdirectory used for tar
+ tmpdir=$PODMAN_TMPDIR/build-test
+ run mkdir -p $tmpdir/subtest || die "Could not mkdir $tmpdir/subtest"
+
+ echo "This is the ORIGINAL file" > $tmpdir/subtest/myfile1
+ run tar -C $tmpdir -cJf $tmpdir/myfile.tar.xz subtest
+
+ cat >$tmpdir/Dockerfile <<EOF
+FROM $IMAGE
+ADD myfile.tar.xz /
+EOF
+
+ # One of: ADD myfile /myfile or COPY . .
+ run_podman build -t build_test -f $tmpdir/Dockerfile $tmpdir
+ is "$output" ".*STEP 3: COMMIT" "COMMIT seen in log"
+ if [[ "$output" =~ "Using cache" ]]; then
+ is "$output" "[no instance of 'Using cache']" "no cache used"
+ fi
+ iid=${lines[-1]}
+
+ run_podman run --rm build_test cat /subtest/myfile1
+ is "$output" "This is the ORIGINAL file" "file contents, first time"
+
+ # Step 2: Recreate the tarfile, with new content. Rerun podman build.
+ echo "This is a NEW file" >| $tmpdir/subtest/myfile2
+ run tar -C $tmpdir -cJf $tmpdir/myfile.tar.xz subtest
+
+ run_podman build -t build_test -f $tmpdir/Dockerfile $tmpdir
+ is "$output" ".*STEP 3: COMMIT" "COMMIT seen in log"
+
+ # Since the tarfile is modified, podman SHOULD NOT use a cached layer.
+ if [[ "$output" =~ "Using cache" ]]; then
+ is "$output" "[no instance of 'Using cache']" "no cache used"
+ fi
+
+ # Pre-buildah-1906, this fails with ENOENT because the tarfile was cached
+ run_podman run --rm build_test cat /subtest/myfile2
+ is "$output" "This is a NEW file" "file contents, second time"
+
+ run_podman rmi -f build_test $iid
+}
+
function teardown() {
# A timeout or other error in 'build' can leave behind stale images
# that podman can't even see and which will cascade into subsequent
diff --git a/troubleshooting.md b/troubleshooting.md
index c4e577645..9def0e08b 100644
--- a/troubleshooting.md
+++ b/troubleshooting.md
@@ -413,7 +413,6 @@ You'll need to either:
### 17) rootless containers exit once the user session exits
-
You need to set lingering mode through loginctl to prevent user processes to be killed once
the user session completed.
@@ -429,3 +428,17 @@ You'll need to either:
or as root if your user has not enough privileges.
* sudo loginctl enable-linger $UID
+
+### 18) `podman run` fails with "bpf create: permission denied error"
+
+The Kernel Lockdown patches deny eBPF programs when Secure Boot is enabled in the BIOS. [Matthew Garrett's post](https://mjg59.dreamwidth.org/50577.html) desribes the relationship between Lockdown and Secure Boot and [Jan-Philip Gehrcke's](https://gehrcke.de/2019/09/running-an-ebpf-program-may-require-lifting-the-kernel-lockdown/) connects this with eBPF. [RH bug 1768125](https://bugzilla.redhat.com/show_bug.cgi?id=1768125) contains some additional details.
+
+#### Symptom
+
+Attempts to run podman result in
+
+```Error: bpf create : Operation not permitted: OCI runtime permission denied error```
+
+#### Solution
+
+One workaround is to disable Secure Boot in your BIOS.
diff --git a/vendor/github.com/Microsoft/hcsshim/appveyor.yml b/vendor/github.com/Microsoft/hcsshim/appveyor.yml
index 7f0f81632..661bc406f 100644
--- a/vendor/github.com/Microsoft/hcsshim/appveyor.yml
+++ b/vendor/github.com/Microsoft/hcsshim/appveyor.yml
@@ -8,7 +8,7 @@ environment:
GOPATH: c:\gopath
PATH: C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin;%GOPATH%\bin;C:\gometalinter-2.0.12-windows-amd64;%PATH%
-stack: go 1.12.9
+stack: go 1.13.4
build_script:
- appveyor DownloadFile https://github.com/alecthomas/gometalinter/releases/download/v2.0.12/gometalinter-2.0.12-windows-amd64.zip
diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go
index 53c0a3854..7205a62c5 100644
--- a/vendor/github.com/Microsoft/hcsshim/container.go
+++ b/vendor/github.com/Microsoft/hcsshim/container.go
@@ -196,7 +196,7 @@ func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskContr
// CreateProcess launches a new process within the container.
func (container *container) CreateProcess(c *ProcessConfig) (Process, error) {
- p, err := container.system.CreateProcessNoStdio(c)
+ p, err := container.system.CreateProcess(context.Background(), c)
if err != nil {
return nil, convertSystemError(err, container)
}
diff --git a/vendor/github.com/Microsoft/hcsshim/go.mod b/vendor/github.com/Microsoft/hcsshim/go.mod
index 5f76d444d..72d253dad 100644
--- a/vendor/github.com/Microsoft/hcsshim/go.mod
+++ b/vendor/github.com/Microsoft/hcsshim/go.mod
@@ -1,6 +1,6 @@
module github.com/Microsoft/hcsshim
-go 1.12
+go 1.13
require (
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
index d366f629f..2ad978f29 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
@@ -20,6 +20,8 @@ type Process struct {
handle vmcompute.HcsProcess
processID int
system *System
+ hasCachedStdio bool
+ stdioLock sync.Mutex
stdin io.WriteCloser
stdout io.ReadCloser
stderr io.ReadCloser
@@ -272,8 +274,8 @@ func (process *Process) ExitCode() (int, error) {
}
// StdioLegacy returns the stdin, stdout, and stderr pipes, respectively. Closing
-// these pipes does not close the underlying pipes; but this function can only
-// be called once on each Process.
+// these pipes does not close the underlying pipes. Once returned, these pipes
+// are the responsibility of the caller to close.
func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) {
operation := "hcsshim::Process::StdioLegacy"
ctx, span := trace.StartSpan(context.Background(), operation)
@@ -290,6 +292,15 @@ func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.R
return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
+ process.stdioLock.Lock()
+ defer process.stdioLock.Unlock()
+ if process.hasCachedStdio {
+ stdin, stdout, stderr := process.stdin, process.stdout, process.stderr
+ process.stdin, process.stdout, process.stderr = nil, nil, nil
+ process.hasCachedStdio = false
+ return stdin, stdout, stderr, nil
+ }
+
processInfo, resultJSON, err := vmcompute.HcsGetProcessInfo(ctx, process.handle)
events := processHcsResult(ctx, resultJSON)
if err != nil {
@@ -307,6 +318,8 @@ func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.R
// Stdio returns the stdin, stdout, and stderr pipes, respectively.
// To close them, close the process handle.
func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) {
+ process.stdioLock.Lock()
+ defer process.stdioLock.Unlock()
return process.stdin, process.stdout, process.stderr
}
@@ -340,9 +353,13 @@ func (process *Process) CloseStdin(ctx context.Context) error {
return makeProcessError(process, operation, err, events)
}
+ process.stdioLock.Lock()
if process.stdin != nil {
process.stdin.Close()
+ process.stdin = nil
}
+ process.stdioLock.Unlock()
+
return nil
}
@@ -365,15 +382,20 @@ func (process *Process) Close() (err error) {
return nil
}
+ process.stdioLock.Lock()
if process.stdin != nil {
process.stdin.Close()
+ process.stdin = nil
}
if process.stdout != nil {
process.stdout.Close()
+ process.stdout = nil
}
if process.stderr != nil {
process.stderr.Close()
+ process.stderr = nil
}
+ process.stdioLock.Unlock()
if err = process.unregisterCallback(ctx); err != nil {
return makeProcessError(process, operation, err, nil)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
index 98df25bd5..6300a7974 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
@@ -482,38 +482,6 @@ func (computeSystem *System) createProcess(ctx context.Context, operation string
return newProcess(processHandle, int(processInfo.ProcessId), computeSystem), &processInfo, nil
}
-// CreateProcessNoStdio launches a new process within the computeSystem. The
-// Stdio handles are not cached on the process struct.
-func (computeSystem *System) CreateProcessNoStdio(c interface{}) (_ cow.Process, err error) {
- operation := "hcsshim::System::CreateProcessNoStdio"
- ctx, span := trace.StartSpan(context.Background(), operation)
- defer span.End()
- defer func() { oc.SetSpanStatus(span, err) }()
- span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
-
- process, processInfo, err := computeSystem.createProcess(ctx, operation, c)
- if err != nil {
- return nil, err
- }
- defer func() {
- if err != nil {
- process.Close()
- }
- }()
-
- // We don't do anything with these handles. Close them so they don't leak.
- syscall.Close(processInfo.StdInput)
- syscall.Close(processInfo.StdOutput)
- syscall.Close(processInfo.StdError)
-
- if err = process.registerCallback(ctx); err != nil {
- return nil, makeSystemError(computeSystem, operation, "", err, nil)
- }
- go process.waitBackground()
-
- return process, nil
-}
-
// CreateProcess launches a new process within the computeSystem.
func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) (cow.Process, error) {
operation := "hcsshim::System::CreateProcess"
@@ -534,6 +502,7 @@ func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) (
process.stdin = pipes[0]
process.stdout = pipes[1]
process.stderr = pipes[2]
+ process.hasCachedStdio = true
if err = process.registerCallback(ctx); err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
index 3488cc451..726d1c8c1 100644
--- a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
+++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
@@ -21,7 +21,7 @@ const (
// 2019 (ltsc2019), and Windows 10 (October 2018 Update).
RS5 = 17763
- // V19H1 (version 1903) corresponds to Windows Sever 1903 (semi-annual
+ // V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual
// channel).
V19H1 = 18362
)
diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md
index b41ff8350..a0baf30e9 100644
--- a/vendor/github.com/containers/buildah/CHANGELOG.md
+++ b/vendor/github.com/containers/buildah/CHANGELOG.md
@@ -2,6 +2,46 @@
# Changelog
+## v1.11.6 (2019-12-03)
+ Handle missing equal sign in --from and --chown flags for COPY/ADD
+ bud COPY does not download URL
+ Bump github.com/onsi/gomega from 1.7.0 to 1.7.1
+ Fix .dockerignore exclude regression
+ Ran buildah through codespell
+ commit(docker): always set ContainerID and ContainerConfig
+ Touch up commit man page image parameter
+ Add builder identity annotations.
+ info: use util.Runtime()
+ Bump github.com/onsi/ginkgo from 1.10.2 to 1.10.3
+ Bump back to v1.12.0-dev
+
+## v1.11.5 (2019-11-11)
+ Enhance error on unsafe symbolic link targets
+ Add OCIRuntime to info
+ Check nonexsit authfile
+ Only output image id if running buildah bud --quiet
+ Fix --pull=true||false and add --pull-never to bud and from (retry)
+ cgroups v2: tweak or skip tests
+ Prepwork: new 'skip' helpers for tests
+ Handle configuration blobs for manifest lists
+ unmarshalConvertedConfig: avoid using the updated image's ref
+ Add completions for Manifest commands
+ Add disableFips option to secrets pkg
+ Update bud.bats test archive test
+ Add test for caching based on content digest
+ Builder.untarPath(): always evaluate b.ContentDigester.Hash()
+ Bump github.com/onsi/ginkgo from 1.10.1 to 1.10.2
+ Fix another broken test: copy-url-mtime
+ yet more fixes
+ Actual bug fix for 'add' test: fix the expected mode
+ BATS tests - lots of mostly minor cleanup
+ build: drop support for ostree
+ Add support for make vendor-in-container
+ imgtype: exit with error if storage fails
+ remove XDG_RUNTIME_DIR from default authfile path
+ fix troubleshooting redirect instructions
+ Bump back to v1.12.0-dev
+
## v1.11.4 (2019-10-28)
buildah: add a "manifest" command
manifests: add the module
diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go
index bd3d25cd4..b5119e369 100644
--- a/vendor/github.com/containers/buildah/add.go
+++ b/vendor/github.com/containers/buildah/add.go
@@ -299,7 +299,9 @@ func (b *Builder) addHelper(excludes *fileutils.PatternMatcher, extract bool, de
}
}
logrus.Debugf("copying[%d] %q to %q", n, esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*")
- if excludes == nil || !excludes.Exclusions() {
+
+ // Copy the whole directory because we do not exclude anything
+ if excludes == nil {
if err = copyWithTar(esrc, dest); err != nil {
return errors.Wrapf(err, "error copying %q to %q", esrc, dest)
}
@@ -309,13 +311,22 @@ func (b *Builder) addHelper(excludes *fileutils.PatternMatcher, extract bool, de
if err != nil {
return err
}
- skip, err := excludes.Matches(path)
+
+ res, err := excludes.MatchesResult(path)
if err != nil {
return errors.Wrapf(err, "error checking if %s is an excluded path", path)
}
- if skip {
+ // Skip the whole directory if the pattern matches exclusively
+ if res.Excludes() == 0 && res.Matches() == 1 && info.IsDir() {
+ return filepath.SkipDir
+ }
+ // The latest match result has the highest priority,
+ // which means that we only skip the filepath if
+ // the last result matched.
+ if res.IsMatched() {
return nil
}
+
// combine the source's basename with the dest directory
fpath, err := filepath.Rel(esrc, path)
if err != nil {
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 7042590d1..bceafc241 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -27,7 +27,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.11.5"
+ Version = "1.11.6"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
@@ -325,7 +325,7 @@ type CommonBuildOptions struct {
ShmSize string
// Ulimit specifies resource limit options, in the form type:softlimit[:hardlimit].
// These types are recognized:
- // "core": maximimum core dump size (ulimit -c)
+ // "core": maximum core dump size (ulimit -c)
// "cpu": maximum CPU time (ulimit -t)
// "data": maximum size of a process's data segment (ulimit -d)
// "fsize": maximum size of new files (ulimit -f)
diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt
index 098e6d6d4..58d784e35 100644
--- a/vendor/github.com/containers/buildah/changelog.txt
+++ b/vendor/github.com/containers/buildah/changelog.txt
@@ -1,3 +1,16 @@
+- Changelog for v1.11.6 (2019-12-03)
+ * Handle missing equal sign in --from and --chown flags for COPY/ADD
+ * bud COPY does not download URL
+ * Bump github.com/onsi/gomega from 1.7.0 to 1.7.1
+ * Fix .dockerignore exclude regression
+ * Ran buildah through codespell
+ * commit(docker): always set ContainerID and ContainerConfig
+ * Touch up commit man page image parameter
+ * Add builder identity annotations.
+ * info: use util.Runtime()
+ * Bump github.com/onsi/ginkgo from 1.10.2 to 1.10.3
+ * Bump back to v1.12.0-dev
+
- Changelog for v1.11.5 (2019-11-11)
* Enhance error on unsafe symbolic link targets
* Add OCIRuntime to info
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index 4df3b9908..24642f4dc 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -29,6 +29,13 @@ import (
"github.com/sirupsen/logrus"
)
+const (
+ // BuilderIdentityAnnotation is the name of the annotation key containing
+ // the name and version of the producer of the image stored as an
+ // annotation on commit.
+ BuilderIdentityAnnotation = "io.buildah.version"
+)
+
// CommitOptions can be used to alter how an image is committed.
type CommitOptions struct {
// PreferredManifestType is the preferred type of image manifest. The
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 2c76c46bf..684b00ff5 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -6,7 +6,7 @@ require (
github.com/blang/semver v3.5.0+incompatible // indirect
github.com/containernetworking/cni v0.7.1
github.com/containers/image/v5 v5.0.0
- github.com/containers/storage v1.13.5
+ github.com/containers/storage v1.14.0
github.com/cyphar/filepath-securejoin v0.2.2
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker-credential-helpers v0.6.1 // indirect
@@ -21,11 +21,11 @@ require (
github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111 // indirect
github.com/mattn/go-shellwords v1.0.6
github.com/morikuni/aec v1.0.0 // indirect
- github.com/onsi/ginkgo v1.10.2
- github.com/onsi/gomega v1.7.0
+ github.com/onsi/ginkgo v1.10.3
+ github.com/onsi/gomega v1.7.1
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
- github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158
+ github.com/opencontainers/runc v1.0.0-rc9
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7
github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.3.0
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index 15dab4794..1cce3ff7e 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -60,6 +60,12 @@ github.com/containers/storage v1.13.4 h1:j0bBaJDKbUHtAW1MXPFnwXJtqcH+foWeuXK1YaB
github.com/containers/storage v1.13.4/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA=
github.com/containers/storage v1.13.5 h1:/SUzGeOP2HDijpF7Yur21Ch6WTZC1BNeZF917CWcp5c=
github.com/containers/storage v1.13.5/go.mod h1:HELz8Sn+UVbPaUZMI8RvIG9doD4y4z6Gtg4k7xdd2ZY=
+github.com/containers/storage v1.13.6-0.20191016135324-ed4762ae6c66 h1:b/loDwYh+0nIA/9su3SI4kcYaYKtPe74EFYe/Uew6RE=
+github.com/containers/storage v1.13.6-0.20191016135324-ed4762ae6c66/go.mod h1:imKnA8Ozb99yPWt64WPrtNOR0v0HKQZFH4oLV45N22k=
+github.com/containers/storage v1.13.6-0.20191017175359-7daeec89a243 h1:k97CWHLLrJWEKPX3a3uCtj7QClyVC+aBFSGeswKRLFg=
+github.com/containers/storage v1.13.6-0.20191017175359-7daeec89a243/go.mod h1:imKnA8Ozb99yPWt64WPrtNOR0v0HKQZFH4oLV45N22k=
+github.com/containers/storage v1.14.0 h1:LbX6WZaDmkXt4DT4xWIg3YXAWd6oA4K9Fi6/KG1xt84=
+github.com/containers/storage v1.14.0/go.mod h1:qGPsti/qC1xxX+xcpHfiTMT+8ThVE2Jf83wFHHqkDAY=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@@ -187,6 +193,10 @@ github.com/klauspost/compress v1.7.2 h1:liMOoeIvFpr9kEvalrZ7VVBA4wGf7zfOgwBjzz/5
github.com/klauspost/compress v1.7.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.8.1 h1:oygt2ychZFHOB6M9gUgajzgKrwRgHbGC77NwA4COVgI=
github.com/klauspost/compress v1.8.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.1 h1:TWy0o9J9c6LK9C8t7Msh6IAJNXbsU/nvKLTQUU5HdaY=
+github.com/klauspost/compress v1.9.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY=
+github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
@@ -239,11 +249,15 @@ github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.2 h1:uqH7bpe+ERSiDa34FDOF7RikN6RzXgduUF8yarlZp94=
github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY=
+github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -257,6 +271,8 @@ github.com/opencontainers/runc v1.0.0-rc8 h1:dDCFes8Hj1r/i5qnypONo5jdOme/8HWZC/a
github.com/opencontainers/runc v1.0.0-rc8/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158 h1:/A6bAdnSZoTQmKml3MdHAnSEPnBAQeigNBl4sxnfaaQ=
github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
+github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 h1:Dliu5QO+4JYWu/yMshaMU7G3JN2POGpwjJN7gjy10Go=
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -303,6 +319,24 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/saschagrunert/storage v1.12.3-0.20191015073819-a34ddea087da h1:5aEGhStFh+0r/t0kT0utSi5C6MIMHBgMHkeIu1JUvfA=
+github.com/saschagrunert/storage v1.12.3-0.20191015073819-a34ddea087da/go.mod h1:imKnA8Ozb99yPWt64WPrtNOR0v0HKQZFH4oLV45N22k=
+github.com/saschagrunert/storage v1.12.3-0.20191018073047-1d43d5290f84 h1:iBs6FOO2GpFpdaa3WC4XhqHI6S2LE7RTlgn8LodsXVo=
+github.com/saschagrunert/storage v1.12.3-0.20191018073047-1d43d5290f84/go.mod h1:imKnA8Ozb99yPWt64WPrtNOR0v0HKQZFH4oLV45N22k=
+github.com/saschagrunert/storage v1.12.3-0.20191018074207-004188d8ee96 h1:hDio2zc3wMjwSPmUEXSz7lnFeKvP/537/hoEh/5QUls=
+github.com/saschagrunert/storage v1.12.3-0.20191018074207-004188d8ee96/go.mod h1:imKnA8Ozb99yPWt64WPrtNOR0v0HKQZFH4oLV45N22k=
+github.com/saschagrunert/storage v1.12.3-0.20191018074751-2a78ca44fc55 h1:WMsV+abtQGrEahhpTh4RR3q/mdMN3EyJihJzt0x86SY=
+github.com/saschagrunert/storage v1.12.3-0.20191018074751-2a78ca44fc55/go.mod h1:imKnA8Ozb99yPWt64WPrtNOR0v0HKQZFH4oLV45N22k=
+github.com/saschagrunert/storage v1.12.3-0.20191018080359-fa072a5579b2 h1:a2UZl3C4vVtqfIZHAnRSgaa9vs9EjTEpcJES0O3gWqM=
+github.com/saschagrunert/storage v1.12.3-0.20191018080359-fa072a5579b2/go.mod h1:imKnA8Ozb99yPWt64WPrtNOR0v0HKQZFH4oLV45N22k=
+github.com/saschagrunert/storage v1.12.3-0.20191113150726-1d1b91a958a6 h1:6hOuOZqXF7MTt/a44ZWBpLwBrrc+PPs43wh5LW3p3gs=
+github.com/saschagrunert/storage v1.12.3-0.20191113150726-1d1b91a958a6/go.mod h1:apitPTJaaw4MMr0U+Z3WwpX86dwUMOlV/lp0NgZhXTU=
+github.com/saschagrunert/storage v1.12.3-0.20191113151852-f8b56918440b h1:Quf1YA+T4xhABFYYMN/ORBGAYa4WLD2O/cX/NPmoOgc=
+github.com/saschagrunert/storage v1.12.3-0.20191113151852-f8b56918440b/go.mod h1:apitPTJaaw4MMr0U+Z3WwpX86dwUMOlV/lp0NgZhXTU=
+github.com/saschagrunert/storage v1.12.3-0.20191114093559-52adfaa6f31e h1:iX1xFl6TYGIIVcW9xR0OvXrH9dJ69MpIzRt4dc6v1u0=
+github.com/saschagrunert/storage v1.12.3-0.20191114093559-52adfaa6f31e/go.mod h1:apitPTJaaw4MMr0U+Z3WwpX86dwUMOlV/lp0NgZhXTU=
+github.com/saschagrunert/storage v1.12.3-0.20191116170926-5e07044cf0e2 h1:azd4fIVaZqFbBcgbMSuP9YyskvNwRdiV+SO2Z1qJfA8=
+github.com/saschagrunert/storage v1.12.3-0.20191116170926-5e07044cf0e2/go.mod h1:apitPTJaaw4MMr0U+Z3WwpX86dwUMOlV/lp0NgZhXTU=
github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4 h1:rOG9oHVIndNR14f3HRyBy9UPQYmIPniWqTU1TDdHhq4=
github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4/go.mod h1:f/98/SnvAzhAEFQJ3u836FePXvcbE8BS0YGMQNn4mhA=
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
@@ -463,6 +497,8 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v0.0.0-20190624233834-05ebafbffc79/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 79c75ce0b..d333442b8 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -190,6 +190,10 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest,
return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
}
dimage.Parent = docker.ID(i.parent)
+ dimage.Container = i.containerID
+ if dimage.Config != nil {
+ dimage.ContainerConfig = *dimage.Config
+ }
// Always replace this value, since we're newer than our base image.
dimage.Created = created
// Clear the list of diffIDs, since we always repopulate it.
@@ -455,7 +459,6 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
dimage.History = append(dimage.History, dnews)
appendHistory(i.postEmptyLayers)
- dimage.Parent = docker.ID(i.parent)
// Sanity check that we didn't just create a mismatch between non-empty layers in the
// history and the number of diffIDs.
diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
index 67c0a0eb9..311031a95 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
@@ -248,7 +248,7 @@ func (s *StageExecutor) volumeCacheRestore() error {
return nil
}
-// digestContent digests any content that this next instruction would add to
+// digestSpecifiedContent digests any content that this next instruction would add to
// the image, returning the digester if there is any, or nil otherwise. We
// don't care about the details of where in the filesystem the content actually
// goes, because we're not actually going to add it here, so this is less
@@ -424,37 +424,37 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
for _, src := range copy.Src {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
// Source is a URL.
- sources = append(sources, src)
- } else {
- // Treat the source, which is not a URL, as a
- // location relative to the
- // all-content-comes-from-below-this-directory
- // directory.
- srcSecure, err := securejoin.SecureJoin(contextDir, src)
- if err != nil {
- return errors.Wrapf(err, "forbidden path for %q, it is outside of the build context %q", src, contextDir)
- }
- if hadFinalPathSeparator {
- // If destination is a folder, we need to take extra care to
- // ensure that files are copied with correct names (since
- // resolving a symlink may result in a different name).
- _, srcName := filepath.Split(src)
- _, srcNameSecure := filepath.Split(srcSecure)
- if srcName != srcNameSecure {
- options := buildah.AddAndCopyOptions{
- Chown: copy.Chown,
- ContextDir: contextDir,
- Excludes: copyExcludes,
- IDMappingOptions: idMappingOptions,
- }
- if err := s.builder.Add(filepath.Join(copy.Dest, srcName), copy.Download, options, srcSecure); err != nil {
- return err
- }
- continue
+ // returns an error to be compatible with docker
+ return errors.Errorf("source can't be a URL for COPY")
+ }
+ // Treat the source, which is not a URL, as a
+ // location relative to the
+ // all-content-comes-from-below-this-directory
+ // directory.
+ srcSecure, err := securejoin.SecureJoin(contextDir, src)
+ if err != nil {
+ return errors.Wrapf(err, "forbidden path for %q, it is outside of the build context %q", src, contextDir)
+ }
+ if hadFinalPathSeparator {
+ // If destination is a folder, we need to take extra care to
+ // ensure that files are copied with correct names (since
+ // resolving a symlink may result in a different name).
+ _, srcName := filepath.Split(src)
+ _, srcNameSecure := filepath.Split(srcSecure)
+ if srcName != srcNameSecure {
+ options := buildah.AddAndCopyOptions{
+ Chown: copy.Chown,
+ ContextDir: contextDir,
+ Excludes: copyExcludes,
+ IDMappingOptions: idMappingOptions,
+ }
+ if err := s.builder.Add(filepath.Join(copy.Dest, srcName), copy.Download, options, srcSecure); err != nil {
+ return err
}
+ continue
}
- sources = append(sources, srcSecure)
}
+ sources = append(sources, srcSecure)
}
options := buildah.AddAndCopyOptions{
Chown: copy.Chown,
@@ -816,14 +816,22 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
// Check if there's a --from if the step command is COPY or
// ADD. Set copyFrom to point to either the context directory
// or the root of the container from the specified stage.
+ // Also check the chown flag for validity.
s.copyFrom = s.executor.contextDir
- for _, n := range step.Flags {
+ for _, flag := range step.Flags {
command := strings.ToUpper(step.Command)
- if strings.Contains(n, "--from") && (command == "COPY" || command == "ADD") {
+ // chown and from flags should have an '=' sign, '--chown=' or '--from='
+ if command == "COPY" && (flag == "--chown" || flag == "--from") {
+ return "", nil, errors.Errorf("COPY only supports the --chown=<uid:gid> and the --from=<image|stage> flags")
+ }
+ if command == "ADD" && flag == "--chown" {
+ return "", nil, errors.Errorf("ADD only supports the --chown=<uid:gid> flag")
+ }
+ if strings.Contains(flag, "--from") && command == "COPY" {
var mountPoint string
- arr := strings.Split(n, "=")
+ arr := strings.Split(flag, "=")
if len(arr) != 2 {
- return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|index>", command)
+ return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
}
otherStage, ok := s.executor.stages[arr[1]]
if !ok {
diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go
index 322bd8834..68d217b8f 100644
--- a/vendor/github.com/containers/buildah/info.go
+++ b/vendor/github.com/containers/buildah/info.go
@@ -13,6 +13,7 @@ import (
"github.com/containers/buildah/pkg/cgroups"
"github.com/containers/buildah/pkg/unshare"
+ "github.com/containers/buildah/util"
"github.com/containers/storage"
"github.com/containers/storage/pkg/system"
"github.com/sirupsen/logrus"
@@ -52,10 +53,9 @@ func hostInfo() map[string]interface{} {
logrus.Error(err, "err reading cgroups mode")
}
cgroupVersion := "v1"
- ociruntime := "runc"
+ ociruntime := util.Runtime()
if unified {
cgroupVersion = "v2"
- ociruntime = "crun"
}
info["CgroupVersion"] = cgroupVersion
info["OCIRuntime"] = ociruntime
diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md
index f533b0fb2..af340eb86 100644
--- a/vendor/github.com/containers/buildah/install.md
+++ b/vendor/github.com/containers/buildah/install.md
@@ -374,7 +374,7 @@ cat /etc/containers/policy.json
## Vendoring
-Buildah uses Go Modules for vendoring purposes. If you need to update or add a vendored package into Buildah, please follow this proceedure:
+Buildah uses Go Modules for vendoring purposes. If you need to update or add a vendored package into Buildah, please follow this procedure:
* Enter into your sandbox `src/github.com/containers/buildah` and ensure that the GOPATH variable is set to the directory prior as noted above.
* `export GO111MODULE=on`
* Assuming you want to 'bump' the `github.com/containers/storage` package to version 1.12.13, use this command: `go get github.com/containers/storage@v1.12.13`
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index 6f49cc240..60353bebb 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -194,7 +194,7 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults,
fs.StringSliceVar(&flags.DNSSearch, "dns-search", []string{}, "Set custom DNS search domains")
fs.StringSliceVar(&flags.DNSServers, "dns", []string{}, "Set custom DNS servers or disable it completely by setting it to 'none', which prevents the automatic creation of `/etc/resolv.conf`.")
fs.StringSliceVar(&flags.DNSOptions, "dns-option", []string{}, "Set custom DNS options")
- fs.BoolVar(&flags.HTTPProxy, "http-proxy", true, "pass thru HTTP Proxy environment variables")
+ fs.BoolVar(&flags.HTTPProxy, "http-proxy", true, "pass through HTTP Proxy environment variables")
fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")
fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)")
fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap")
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index 9194ddf58..0ec6b5ee3 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -270,7 +270,7 @@ func getMounts(mounts []string) (map[string]specs.Mount, error) {
// TODO(vrothberg): the manual parsing can be replaced with a regular expression
// to allow a more robust parsing of the mount format and to give
- // precise errors regarding supported format versus suppored options.
+ // precise errors regarding supported format versus supported options.
for _, mount := range mounts {
arr := strings.SplitN(mount, ",", 2)
if len(arr) < 2 {
diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml
index 619e077d0..e4b38947d 100644
--- a/vendor/github.com/containers/storage/.cirrus.yml
+++ b/vendor/github.com/containers/storage/.cirrus.yml
@@ -19,9 +19,11 @@ env:
####
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
- FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-30-1-2-1556821664"
- PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-29-1-2-1541789245"
- UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1904-disco-v20190514"
+ _BUILT_IMAGE_SUFFIX: "libpod-6228273469587456"
+ FEDORA_CACHE_IMAGE_NAME: "fedora-31-${_BUILT_IMAGE_SUFFIX}"
+ PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-30-${_BUILT_IMAGE_SUFFIX}"
+ UBUNTU_CACHE_IMAGE_NAME: "ubuntu-19-${_BUILT_IMAGE_SUFFIX}"
+ PRIOR_UBUNTU_CACHE_IMAGE_NAME: "ubuntu-18-${_BUILT_IMAGE_SUFFIX}"
####
#### Command variables to help avoid duplication
@@ -49,11 +51,14 @@ gce_instance:
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
testing_task:
+ depends_on:
+ - lint
gce_instance: # Only need to specify differences from defaults (above)
matrix: # Duplicate this task for each matrix product.
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
image_name: "${UBUNTU_CACHE_IMAGE_NAME}"
+ # image_name: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}" # No fuse3 support
# Separate scripts for separate outputs, makes debugging easier.
setup_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
@@ -99,6 +104,7 @@ meta_task:
${FEDORA_CACHE_IMAGE_NAME}
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
${UBUNTU_CACHE_IMAGE_NAME}
+ ${PRIOR_UBUNTU_CACHE_IMAGE_NAME}
BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
GCPJSON: ENCRYPTED[244a93fe8b386b48b96f748342bf741350e43805eee81dd04b45093bdf737e540b993fc735df41f131835fa0f9b65826]
@@ -110,7 +116,7 @@ meta_task:
vendor_task:
container:
- image: golang:1.12
+ image: golang:1.13
modules_cache:
fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod
diff --git a/vendor/github.com/containers/storage/.gitignore b/vendor/github.com/containers/storage/.gitignore
index 1896698e4..64b9a98bd 100644
--- a/vendor/github.com/containers/storage/.gitignore
+++ b/vendor/github.com/containers/storage/.gitignore
@@ -2,6 +2,7 @@
# if you want to ignore files created by your editor/tools,
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
*.1
+*.5
*.exe
*~
*.orig
diff --git a/vendor/github.com/containers/storage/.travis.yml b/vendor/github.com/containers/storage/.travis.yml
index dc1c61391..a7865729d 100644
--- a/vendor/github.com/containers/storage/.travis.yml
+++ b/vendor/github.com/containers/storage/.travis.yml
@@ -15,30 +15,21 @@ env:
- GO_VERSION="stable"
DISTRO="ubuntu"
- - GO_VERSION="1.11"
- DISTRO="ubuntu"
-
- - GO_VERSION="1.12"
+ - GO_VERSION="1.12.12"
DISTRO="ubuntu"
# Fedora
- GO_VERSION="stable"
DISTRO="fedora"
- - GO_VERSION="1.11"
- DISTRO="fedora"
-
- - GO_VERSION="1.12"
+ - GO_VERSION="1.12.12"
DISTRO="fedora"
# CentOS
- GO_VERSION="stable"
DISTRO="centos"
- - GO_VERSION="1.11"
- DISTRO="centos"
-
- - GO_VERSION="1.12"
+ - GO_VERSION="1.12.12"
DISTRO="centos"
# GO_VERSION="stable" builds successfully, but tests fail on all platforms.
diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile
index 90e5ca499..83005307f 100644
--- a/vendor/github.com/containers/storage/Makefile
+++ b/vendor/github.com/containers/storage/Makefile
@@ -127,6 +127,9 @@ lint: install.tools
help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
+vendor-in-container:
+ podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang make vendor
+
vendor:
export GO111MODULE=on \
$(GO) mod tidy && \
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 43ded9062..42cf0675c 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.13.5
+1.15.2
diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
index d614b78fc..c00b9e47d 100644
--- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
+++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
@@ -16,6 +16,7 @@ import (
"io"
"os"
"path/filepath"
+ "strings"
"syscall"
"time"
@@ -97,7 +98,7 @@ func legacyCopy(srcFile io.Reader, dstFile io.Writer) error {
func copyXattr(srcPath, dstPath, attr string) error {
data, err := system.Lgetxattr(srcPath, attr)
- if err != nil {
+ if err != nil && err != unix.EOPNOTSUPP {
return err
}
if data != nil {
@@ -155,7 +156,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
switch mode := f.Mode(); {
case mode.IsRegular():
- id := fileID{dev: stat.Dev, ino: stat.Ino}
+ id := fileID{dev: uint64(stat.Dev), ino: stat.Ino}
if copyMode == Hardlink {
isHardlink = true
if err2 := os.Link(srcPath, dstPath); err2 != nil {
@@ -271,6 +272,19 @@ func doCopyXattrs(srcPath, dstPath string) error {
return err
}
+ xattrs, err := system.Llistxattr(srcPath)
+ if err != nil && err != unix.EOPNOTSUPP {
+ return err
+ }
+
+ for _, key := range xattrs {
+ if strings.HasPrefix(key, "user.") {
+ if err := copyXattr(srcPath, dstPath, key); err != nil {
+ return err
+ }
+ }
+ }
+
// We need to copy this attribute if it appears in an overlay upper layer, as
// this function is used to copy those. It is set by overlay if a directory
// is removed and then re-created and should not inherit anything from the
diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go
index a45f6b44c..dddf8a8b4 100644
--- a/vendor/github.com/containers/storage/drivers/driver_linux.go
+++ b/vendor/github.com/containers/storage/drivers/driver_linux.go
@@ -48,6 +48,8 @@ const (
FsMagicZfs = FsMagic(0x2fc12fc1)
// FsMagicOverlay filesystem id for overlay
FsMagicOverlay = FsMagic(0x794C7630)
+ // FsMagicFUSE filesystem id for FUSE
+ FsMagicFUSE = FsMagic(0x65735546)
)
var (
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 97222fe7a..13cd65fa5 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -231,13 +231,18 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
}
+ fileSystemType := graphdriver.FsMagicOverlay
+ if opts.mountProgram != "" {
+ fileSystemType = graphdriver.FsMagicFUSE
+ }
+
d := &Driver{
name: "overlay",
home: home,
runhome: runhome,
uidMaps: options.UIDMaps,
gidMaps: options.GIDMaps,
- ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
+ ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(fileSystemType)),
supportsDType: supportsDType,
usingMetacopy: usingMetacopy,
locker: locker.New(),
@@ -1016,8 +1021,39 @@ func (d *Driver) Put(id string) error {
if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
return err
}
- if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) {
- logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
+
+ unmounted := false
+
+ if d.options.mountProgram != "" {
+ // Attempt to unmount the FUSE mount using either fusermount or fusermount3.
+ // If they fail, fallback to unix.Unmount
+ for _, v := range []string{"fusermount3", "fusermount"} {
+ err := exec.Command(v, "-u", mountpoint).Run()
+ if err != nil && !os.IsNotExist(err) {
+ logrus.Debugf("Error unmounting %s with %s - %v", mountpoint, v, err)
+ }
+ if err == nil {
+ unmounted = true
+ break
+ }
+ }
+ // If fusermount|fusermount3 failed to unmount the FUSE file system, make sure all
+ // pending changes are propagated to the file system
+ if !unmounted {
+ fd, err := unix.Open(mountpoint, unix.O_DIRECTORY, 0)
+ if err == nil {
+ if err := unix.Syncfs(fd); err != nil {
+ logrus.Debugf("Error Syncfs(%s) - %v", mountpoint, err)
+ }
+ unix.Close(fd)
+ }
+ }
+ }
+
+ if !unmounted {
+ if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) {
+ logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
+ }
}
if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {
diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go
index 8137fcf67..bf22a5f6f 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go
@@ -3,5 +3,5 @@ package vfs
import "github.com/containers/storage/drivers/copy"
func dirCopy(srcDir, dstDir string) error {
- return copy.DirCopy(srcDir, dstDir, copy.Content, false)
+ return copy.DirCopy(srcDir, dstDir, copy.Content, true)
}
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 934e82ad2..378b427de 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -3,26 +3,29 @@ module github.com/containers/storage
require (
github.com/BurntSushi/toml v0.3.1
github.com/DataDog/zstd v1.4.0 // indirect
- github.com/Microsoft/go-winio v0.4.12
- github.com/Microsoft/hcsshim v0.8.6
- github.com/docker/docker v0.0.0-20171019062838-86f080cff091
+ github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
+ github.com/Microsoft/hcsshim v0.8.7
+ github.com/docker/docker v0.0.0-20171019062838-86f080cff091 // indirect
github.com/docker/go-units v0.4.0
- github.com/klauspost/compress v1.7.2
+ github.com/klauspost/compress v1.9.4
github.com/klauspost/cpuid v1.2.1 // indirect
github.com/klauspost/pgzip v1.2.1
- github.com/mattn/go-shellwords v1.0.5
+ github.com/mattn/go-shellwords v1.0.6
github.com/mistifyio/go-zfs v2.1.1+incompatible
github.com/opencontainers/go-digest v1.0.0-rc1
- github.com/opencontainers/runc v1.0.0-rc8
- github.com/opencontainers/selinux v1.2.2
+ github.com/opencontainers/runc v1.0.0-rc9
+ github.com/opencontainers/selinux v1.3.0
github.com/pkg/errors v0.8.1
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
github.com/sirupsen/logrus v1.4.2
- github.com/stretchr/testify v1.3.0
+ github.com/spf13/pflag v1.0.3 // indirect
+ github.com/stretchr/testify v1.4.0
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/tchap/go-patricia v2.3.0+incompatible
github.com/vbatts/tar-split v0.11.1
golang.org/x/net v0.0.0-20190628185345-da137c7871d7
- golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb
- gotest.tools v0.0.0-20190624233834-05ebafbffc79
+ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
+ gotest.tools v2.2.0+incompatible
)
+
+go 1.13
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index a0e05dd1d..f31828d2a 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -1,11 +1,40 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/zstd v1.4.0 h1:vhoV+DUHnRZdKW1i5UMjAk2G4JY8wN4ayRfYDNdEhwo=
github.com/DataDog/zstd v1.4.0/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc=
github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
+github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA=
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg=
+github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
+github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b h1:T4nWG1TXIxeor8mAu5bFguPJgSIGhZqv/f0z55KCrJM=
+github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
+github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
+github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50 h1:WMpHmC6AxwWb9hMqhudkqG7A/p14KiMnl6d3r1iUMjU=
+github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
+github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -13,10 +42,37 @@ github.com/docker/docker v0.0.0-20171019062838-86f080cff091 h1:QpxpTw4MJeOzbC7X0
github.com/docker/docker v0.0.0-20171019062838-86f080cff091/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
+github.com/godbus/dbus v4.1.0+incompatible h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4=
+github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
+github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.7.2 h1:liMOoeIvFpr9kEvalrZ7VVBA4wGf7zfOgwBjzz/5g2Y=
github.com/klauspost/compress v1.7.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.1 h1:TWy0o9J9c6LK9C8t7Msh6IAJNXbsU/nvKLTQUU5HdaY=
+github.com/klauspost/compress v1.9.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY=
+github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.3 h1:hkFELABwacUEgBfiguNeQydKv3M9pawBq8o24Ypw9+M=
+github.com/klauspost/compress v1.9.3/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.4 h1:xhvAeUPQ2drNUhKtrGdTGNvV9nNafHMUkRyLkzxJoB4=
+github.com/klauspost/compress v1.9.4/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
@@ -25,16 +81,30 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGi
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/mattn/go-shellwords v1.0.5 h1:JhhFTIOslh5ZsPrpa3Wdg8bF0WI3b44EMblmU9wIsXc=
github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.6 h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3ZkeUUI=
+github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM=
+github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
+github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc8 h1:dDCFes8Hj1r/i5qnypONo5jdOme/8HWZC/aNDyhECt0=
github.com/opencontainers/runc v1.0.0-rc8/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
+github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ=
+github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4FtGEe8bFg=
github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
+github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqGe5TgR0g=
+github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -42,28 +112,100 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 h1:gGBSHPOU7g8YjTbhwn+lvFm2VDEhhA+PwDIlstkgSxE=
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
+github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
+github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
+github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM=
+github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
+github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f h1:nBX3nTcmxEtHSERBJaIo1Qa26VwRaopnZmfDQUXsF4I=
+github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191025090151-53bf42e6b339 h1:zSqWKgm/o7HAnlAzBQ+aetp9fpuyytsXnKA8eiLHYQM=
+golang.org/x/sys v0.0.0-20191025090151-53bf42e6b339/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2 h1:/J2nHFg1MTqaRLFO7M+J78ASNsJoz3r0cvHBPQ77fsE=
+golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v0.0.0-20190624233834-05ebafbffc79 h1:C+K4iPg1rIvmCf4JjelkbWv2jeWevEwp05Lz8XfTYgE=
gotest.tools v0.0.0-20190624233834-05ebafbffc79/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90=
+gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go
index 5d6a2e48d..6373ebb41 100644
--- a/vendor/github.com/containers/storage/images.go
+++ b/vendor/github.com/containers/storage/images.go
@@ -47,6 +47,11 @@ type Image struct {
// or canonical references.
Names []string `json:"names,omitempty"`
+ // NamesHistory is an optional set of Names the image had in the past. The
+ // contained names are free from any duplicates, whereas the newest entry
+ // is the first one.
+ NamesHistory []string `json:"names-history,omitempty"`
+
// TopLayer is the ID of the topmost layer of the image itself, if the
// image contains one or more layers. Multiple images can refer to the
// same top layer.
@@ -155,6 +160,7 @@ func copyImage(i *Image) *Image {
Digest: i.Digest,
Digests: copyDigestSlice(i.Digests),
Names: copyStringSlice(i.Names),
+ NamesHistory: copyStringSlice(i.NamesHistory),
TopLayer: i.TopLayer,
MappedTopLayers: copyStringSlice(i.MappedTopLayers),
Metadata: i.Metadata,
@@ -481,6 +487,10 @@ func (r *imageStore) removeName(image *Image, name string) {
image.Names = stringSliceWithoutValue(image.Names, name)
}
+func (i *Image) addNameToHistory(name string) {
+ i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...))
+}
+
func (r *imageStore) SetNames(id string, names []string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath())
@@ -495,6 +505,7 @@ func (r *imageStore) SetNames(id string, names []string) error {
r.removeName(otherImage, name)
}
r.byname[name] = image
+ image.addNameToHistory(name)
}
image.Names = names
return r.Save()
diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go
index 539acfe93..0dde97c18 100644
--- a/vendor/github.com/containers/storage/images_ffjson.go
+++ b/vendor/github.com/containers/storage/images_ffjson.go
@@ -59,6 +59,22 @@ func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
}
buf.WriteByte(',')
}
+ if len(j.NamesHistory) != 0 {
+ buf.WriteString(`"names-history":`)
+ if j.NamesHistory != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.NamesHistory {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.WriteJsonString(buf, string(v))
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
if len(j.TopLayer) != 0 {
buf.WriteString(`"layer":`)
fflib.WriteJsonString(buf, string(j.TopLayer))
@@ -171,6 +187,8 @@ const (
ffjtImageNames
+ ffjtImageNamesHistory
+
ffjtImageTopLayer
ffjtImageMappedTopLayers
@@ -194,6 +212,8 @@ var ffjKeyImageDigest = []byte("digest")
var ffjKeyImageNames = []byte("names")
+var ffjKeyImageNamesHistory = []byte("names-history")
+
var ffjKeyImageTopLayer = []byte("layer")
var ffjKeyImageMappedTopLayers = []byte("mapped-layers")
@@ -348,6 +368,11 @@ mainparse:
currentKey = ffjtImageNames
state = fflib.FFParse_want_colon
goto mainparse
+
+ } else if bytes.Equal(ffjKeyImageNamesHistory, kn) {
+ currentKey = ffjtImageNamesHistory
+ state = fflib.FFParse_want_colon
+ goto mainparse
}
}
@@ -400,6 +425,12 @@ mainparse:
goto mainparse
}
+ if fflib.EqualFoldRight(ffjKeyImageNamesHistory, kn) {
+ currentKey = ffjtImageNamesHistory
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
if fflib.EqualFoldRight(ffjKeyImageNames, kn) {
currentKey = ffjtImageNames
state = fflib.FFParse_want_colon
@@ -444,6 +475,9 @@ mainparse:
case ffjtImageNames:
goto handle_Names
+ case ffjtImageNamesHistory:
+ goto handle_NamesHistory
+
case ffjtImageTopLayer:
goto handle_TopLayer
@@ -608,6 +642,80 @@ handle_Names:
state = fflib.FFParse_after_value
goto mainparse
+handle_NamesHistory:
+
+ /* handler: j.NamesHistory type=[]string kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.NamesHistory = nil
+ } else {
+
+ j.NamesHistory = []string{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJNamesHistory string
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJNamesHistory type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ tmpJNamesHistory = string(string(outBuf))
+
+ }
+ }
+
+ j.NamesHistory = append(j.NamesHistory, tmpJNamesHistory)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
handle_TopLayer:
/* handler: j.TopLayer type=string kind=string quoted=false*/
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 20f017e64..6e2618d1e 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -387,7 +387,10 @@ func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
// to a tar header
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
- capability, _ := system.Lgetxattr(path, "security.capability")
+ capability, err := system.Lgetxattr(path, "security.capability")
+ if err != nil && err != system.EOPNOTSUPP {
+ return err
+ }
if capability != nil {
hdr.Xattrs = make(map[string]string)
hdr.Xattrs["security.capability"] = string(capability)
@@ -395,6 +398,31 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
return nil
}
+// ReadUserXattrToTarHeader reads user.* xattr from filesystem to a tar header
+func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error {
+ xattrs, err := system.Llistxattr(path)
+ if err != nil && err != system.EOPNOTSUPP {
+ return err
+ }
+ for _, key := range xattrs {
+ if strings.HasPrefix(key, "user.") {
+ value, err := system.Lgetxattr(path, key)
+ if err == system.E2BIG {
+ logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", path, key)
+ continue
+ }
+ if err != nil {
+ return err
+ }
+ if hdr.Xattrs == nil {
+ hdr.Xattrs = make(map[string]string)
+ }
+ hdr.Xattrs[key] = string(value)
+ }
+ }
+ return nil
+}
+
type tarWhiteoutConverter interface {
ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
ConvertRead(*tar.Header, string) (bool, error)
@@ -469,6 +497,9 @@ func (ta *tarAppender) addTarFile(path, name string) error {
if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
return err
}
+ if err := ReadUserXattrToTarHeader(path, hdr); err != nil {
+ return err
+ }
if ta.CopyPass {
copyPassHeader(hdr)
}
@@ -540,10 +571,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
}
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
- // We use system.OpenSequential to ensure we use sequential file
- // access on Windows to avoid depleting the standby list.
- // On Linux, this equates to a regular os.Open.
- file, err := system.OpenSequential(path)
+ file, err := os.Open(path)
if err != nil {
return err
}
@@ -584,7 +612,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
// Source is regular file. We use system.OpenFileSequential to use sequential
// file access to avoid depleting the standby list on Windows.
// On Linux, this equates to a regular os.OpenFile
- file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+ file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
if err != nil {
return err
}
@@ -821,11 +849,12 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
// is asking for that file no matter what - which is true
// for some files, like .dockerignore and Dockerfile (sometimes)
if include != relFilePath {
- skip, err = pm.Matches(relFilePath)
+ matches, err := pm.IsMatch(relFilePath)
if err != nil {
logrus.Errorf("Error matching %s: %v", relFilePath, err)
return err
}
+ skip = matches
}
if skip {
@@ -1164,7 +1193,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
dst = filepath.Join(dst, filepath.Base(src))
}
// Create the holding directory if necessary
- if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil {
+ if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil {
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
index 5602c7e21..3a47eceae 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
@@ -61,10 +61,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
}
if statErr == nil {
if stat.Mode()&os.ModeCharDevice != 0 {
- // It's a whiteout for this directory, so it can't have been
- // both deleted and recreated in the layer we're diffing.
- s := stat.Sys().(*syscall.Stat_t)
- if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
+ if isWhiteOut(stat) {
return nil, nil
}
}
@@ -98,8 +95,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
// If it's whiteout for a parent directory, then the
// original directory wasn't inherited into this layer,
// so we don't need to emit whiteout for it.
- s := stat.Sys().(*syscall.Stat_t)
- if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
+ if isWhiteOut(stat) {
return nil, nil
}
}
@@ -141,3 +137,8 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool,
return true, nil
}
+
+func isWhiteOut(stat os.FileInfo) bool {
+ s := stat.Sys().(*syscall.Stat_t)
+ return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go
index d3d6c8f74..3ce396070 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes.go
@@ -8,6 +8,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "reflect"
"sort"
"strings"
"syscall"
@@ -263,6 +264,7 @@ type FileInfo struct {
children map[string]*FileInfo
capability []byte
added bool
+ xattrs map[string]string
}
// LookUp looks up the file information of a file.
@@ -331,7 +333,8 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
// breaks down is if some code intentionally hides a change by setting
// back mtime
if statDifferent(oldStat, oldInfo, newStat, info) ||
- !bytes.Equal(oldChild.capability, newChild.capability) {
+ !bytes.Equal(oldChild.capability, newChild.capability) ||
+ !reflect.DeepEqual(oldChild.xattrs, newChild.xattrs) {
change := Change{
Path: newChild.path(),
Kind: ChangeModify,
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
index b12309361..ecfa45d73 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
@@ -6,11 +6,13 @@ import (
"os"
"path/filepath"
"sort"
+ "strings"
"syscall"
"unsafe"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
+ "github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -83,7 +85,30 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
return err
}
info.stat = stat
- info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
+ info.capability, err = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
+ if err != nil && err != system.EOPNOTSUPP {
+ return err
+ }
+ xattrs, err := system.Llistxattr(cpath)
+ if err != nil && err != system.EOPNOTSUPP {
+ return err
+ }
+ for _, key := range xattrs {
+ if strings.HasPrefix(key, "user.") {
+ value, err := system.Lgetxattr(cpath, key)
+ if err == system.E2BIG {
+ logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", cpath, key)
+ continue
+ }
+ if err != nil {
+ return err
+ }
+ if info.xattrs == nil {
+ info.xattrs = make(map[string]string)
+ }
+ info.xattrs[key] = string(value)
+ }
+ }
parent.children[info.name] = info
return nil
}
@@ -307,9 +332,7 @@ func overlayLowerContainsWhiteout(root, path string) (bool, error) {
return false, err
}
if err == nil && stat.Mode()&os.ModeCharDevice != 0 {
- // Check if there's whiteout for the specified item in the specified layer.
- s := stat.Sys().(*syscall.Stat_t)
- if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
+ if isWhiteOut(stat) {
return true, nil
}
}
@@ -319,8 +342,7 @@ func overlayLowerContainsWhiteout(root, path string) (bool, error) {
func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (string, error) {
// If it's a whiteout item, then a file or directory with that name is removed by this layer.
if fi.Mode()&os.ModeCharDevice != 0 {
- s := fi.Sys().(*syscall.Stat_t)
- if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
+ if isWhiteOut(fi) {
return path, nil
}
}
@@ -350,10 +372,7 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str
}
if err == nil {
if stat.Mode()&os.ModeCharDevice != 0 {
- // It's a whiteout for this directory, so it can't have been
- // deleted in this layer.
- s := stat.Sys().(*syscall.Stat_t)
- if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
+ if isWhiteOut(stat) {
return "", nil
}
}
@@ -370,10 +389,7 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str
}
if err == nil {
if stat.Mode()&os.ModeCharDevice != 0 {
- // If it's whiteout for a parent directory, then the
- // original directory wasn't inherited into the top layer.
- s := stat.Sys().(*syscall.Stat_t)
- if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
+ if isWhiteOut(stat) {
return "", nil
}
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go
index ea012b2d9..6298a674d 100644
--- a/vendor/github.com/containers/storage/pkg/archive/copy.go
+++ b/vendor/github.com/containers/storage/pkg/archive/copy.go
@@ -9,7 +9,6 @@ import (
"path/filepath"
"strings"
- "github.com/containers/storage/pkg/system"
"github.com/sirupsen/logrus"
)
@@ -198,7 +197,7 @@ func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
return CopyInfo{}, err
}
- if !system.IsAbs(linkTarget) {
+ if !filepath.IsAbs(linkTarget) {
// Join with the parent directory.
dstParent, _ := SplitPathDirEntry(path)
linkTarget = filepath.Join(dstParent, linkTarget)
diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go
index b8ec876dd..3c2601dee 100644
--- a/vendor/github.com/containers/storage/pkg/archive/diff.go
+++ b/vendor/github.com/containers/storage/pkg/archive/diff.go
@@ -84,7 +84,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
- err = system.MkdirAll(parentPath, 0600, "")
+ err = os.MkdirAll(parentPath, 0600)
if err != nil {
return 0, err
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
index ca9fb10d7..630826db1 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
@@ -96,11 +96,13 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
cmd.Stderr = output
if err := cmd.Start(); err != nil {
+ w.Close()
return fmt.Errorf("Untar error on re-exec cmd: %v", err)
}
//write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
+ w.Close()
return fmt.Errorf("Untar json encode to pipe failed: %v", err)
}
w.Close()
diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go
index 091040140..f3f855c32 100644
--- a/vendor/github.com/containers/storage/pkg/config/config.go
+++ b/vendor/github.com/containers/storage/pkg/config/config.go
@@ -1,5 +1,11 @@
package config
+import (
+ "fmt"
+
+ "github.com/sirupsen/logrus"
+)
+
// ThinpoolOptionsConfig represents the "storage.options.thinpool"
// TOML config table.
type ThinpoolOptionsConfig struct {
@@ -47,6 +53,9 @@ type ThinpoolOptionsConfig struct {
// devices.
MountOpt string `toml:"mountopt"`
+ // Size
+ Size string `toml:"size"`
+
// UseDeferredDeletion marks device for deferred deletion
UseDeferredDeletion string `toml:"use_deferred_deletion"`
@@ -59,6 +68,47 @@ type ThinpoolOptionsConfig struct {
XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"`
}
+type AufsOptionsConfig struct {
+ // MountOpt specifies extra mount options used when mounting
+ MountOpt string `toml:"mountopt"`
+}
+
+type BtrfsOptionsConfig struct {
+ // MinSpace is the minimal spaces allocated to the device
+ MinSpace string `toml:"min_space"`
+ // Size
+ Size string `toml:"size"`
+}
+
+type OverlayOptionsConfig struct {
+ // IgnoreChownErrors is a flag for whether chown errors should be
+ // ignored when building an image.
+ IgnoreChownErrors string `toml:"ignore_chown_errors"`
+ // MountOpt specifies extra mount options used when mounting
+ MountOpt string `toml:"mountopt"`
+ // Alternative program to use for the mount of the file system
+ MountProgram string `toml:"mount_program"`
+ // Size
+ Size string `toml:"size"`
+ // Do not create a bind mount on the storage home
+ SkipMountHome string `toml:"skip_mount_home"`
+}
+
+type VfsOptionsConfig struct {
+ // IgnoreChownErrors is a flag for whether chown errors should be
+ // ignored when building an image.
+ IgnoreChownErrors string `toml:"ignore_chown_errors"`
+}
+
+type ZfsOptionsConfig struct {
+ // MountOpt specifies extra mount options used when mounting
+ MountOpt string `toml:"mountopt"`
+ // Name is the File System name of the ZFS File system
+ Name string `toml:"fsname"`
+ // Size
+ Size string `toml:"size"`
+}
+
// OptionsConfig represents the "storage.options" TOML config table.
type OptionsConfig struct {
// AdditionalImagesStores is the location of additional read/only
@@ -83,12 +133,156 @@ type OptionsConfig struct {
// RemapGroup is the name of one or more entries in /etc/subgid which
// should be used to set up default GID mappings.
RemapGroup string `toml:"remap-group"`
+
+ // Aufs container options to be handed to aufs drivers
+ Aufs struct{ AufsOptionsConfig } `toml:"aufs"`
+
+ // Btrfs container options to be handed to btrfs drivers
+ Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs"`
+
// Thinpool container options to be handed to thinpool drivers
Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"`
+ // Overlay container options to be handed to overlay drivers
+ Overlay struct{ OverlayOptionsConfig } `toml:"overlay"`
+
+ // Vfs container options to be handed to VFS drivers
+ Vfs struct{ VfsOptionsConfig } `toml:"vfs"`
+
+ // Zfs container options to be handed to ZFS drivers
+ Zfs struct{ ZfsOptionsConfig } `toml:"zfs"`
+
+ // Do not create a bind mount on the storage home
+ SkipMountHome string `toml:"skip_mount_home"`
+
// Alternative program to use for the mount of the file system
MountProgram string `toml:"mount_program"`
// MountOpt specifies extra mount options used when mounting
MountOpt string `toml:"mountopt"`
}
+
+// GetGraphDriverOptions returns the driver specific options
+func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
+ var doptions []string
+ switch driverName {
+ case "aufs":
+ if options.Aufs.MountOpt != "" {
+ return append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Aufs.MountOpt))
+ } else if options.MountOpt != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt))
+ }
+
+ case "btrfs":
+ if options.Btrfs.MinSpace != "" {
+ return append(doptions, fmt.Sprintf("%s.min_space=%s", driverName, options.Btrfs.MinSpace))
+ }
+ if options.Btrfs.Size != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Btrfs.Size))
+ } else if options.Size != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
+ }
+
+ case "devicemapper":
+ if options.Thinpool.AutoExtendPercent != "" {
+ doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", options.Thinpool.AutoExtendPercent))
+ }
+ if options.Thinpool.AutoExtendThreshold != "" {
+ doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", options.Thinpool.AutoExtendThreshold))
+ }
+ if options.Thinpool.BaseSize != "" {
+ doptions = append(doptions, fmt.Sprintf("dm.basesize=%s", options.Thinpool.BaseSize))
+ }
+ if options.Thinpool.BlockSize != "" {
+ doptions = append(doptions, fmt.Sprintf("dm.blocksize=%s", options.Thinpool.BlockSize))
+ }
+ if options.Thinpool.DirectLvmDevice != "" {
+ doptions = append(doptions, fmt.Sprintf("dm.directlvm_device=%s", options.Thinpool.DirectLvmDevice))
+ }
+ if options.Thinpool.DirectLvmDeviceForce != "" {
+ doptions = append(doptions, fmt.Sprintf("dm.directlvm_device_force=%s", options.Thinpool.DirectLvmDeviceForce))
+ }
+ if options.Thinpool.Fs != "" {
+ doptions = append(doptions, fmt.Sprintf("dm.fs=%s", options.Thinpool.Fs))
+ }
+ if options.Thinpool.LogLevel != "" {
+ doptions = append(doptions, fmt.Sprintf("dm.libdm_log_level=%s", options.Thinpool.LogLevel))
+ }
+ if options.Thinpool.MinFreeSpace != "" {
+ doptions = append(doptions, fmt.Sprintf("dm.min_free_space=%s", options.Thinpool.MinFreeSpace))
+ }
+ if options.Thinpool.MkfsArg != "" {
+ doptions = append(doptions, fmt.Sprintf("dm.mkfsarg=%s", options.Thinpool.MkfsArg))
+ }
+ if options.Thinpool.MountOpt != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Thinpool.MountOpt))
+ } else if options.MountOpt != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt))
+ }
+
+ if options.Thinpool.Size != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Thinpool.Size))
+ } else if options.Size != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
+ }
+
+ if options.Thinpool.UseDeferredDeletion != "" {
+ doptions = append(doptions, fmt.Sprintf("dm.use_deferred_deletion=%s", options.Thinpool.UseDeferredDeletion))
+ }
+ if options.Thinpool.UseDeferredRemoval != "" {
+ doptions = append(doptions, fmt.Sprintf("dm.use_deferred_removal=%s", options.Thinpool.UseDeferredRemoval))
+ }
+ if options.Thinpool.XfsNoSpaceMaxRetries != "" {
+ doptions = append(doptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", options.Thinpool.XfsNoSpaceMaxRetries))
+ }
+
+ case "overlay":
+ if options.Overlay.IgnoreChownErrors != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Overlay.IgnoreChownErrors))
+ } else if options.IgnoreChownErrors != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors))
+ }
+ if options.Overlay.MountProgram != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.Overlay.MountProgram))
+ } else if options.MountProgram != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.MountProgram))
+ }
+ if options.Overlay.MountOpt != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Overlay.MountOpt))
+ } else if options.MountOpt != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt))
+ }
+ if options.Overlay.Size != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Overlay.Size))
+ } else if options.Size != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
+ }
+
+ if options.Overlay.SkipMountHome != "" || options.SkipMountHome != "" {
+ logrus.Warn("skip_mount_home option is no longer supported, ignoring option")
+ }
+
+ case "vfs":
+ if options.Vfs.IgnoreChownErrors != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors))
+ } else if options.IgnoreChownErrors != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors))
+ }
+
+ case "zfs":
+ if options.Zfs.Name != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.fsname=%s", driverName, options.Zfs.Name))
+ }
+ if options.Zfs.MountOpt != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Zfs.MountOpt))
+ } else if options.MountOpt != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt))
+ }
+ if options.Zfs.Size != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Zfs.Size))
+ } else if options.Size != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
+ }
+ }
+ return doptions
+}
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
index a129e654e..388eaf9d5 100644
--- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
@@ -57,6 +57,7 @@ func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
return pm, nil
}
+// Deprecated: Please use the `MatchesResult` method instead.
// Matches matches path against all the patterns. Matches is not safe to be
// called concurrently
func (pm *PatternMatcher) Matches(file string) (bool, error) {
@@ -96,6 +97,85 @@ func (pm *PatternMatcher) Matches(file string) (bool, error) {
return matched, nil
}
+type MatchResult struct {
+ isMatched bool
+ matches, excludes uint
+}
+
+// Excludes returns true if the overall result is matched
+func (m *MatchResult) IsMatched() bool {
+ return m.isMatched
+}
+
+// Excludes returns the amount of matches of an MatchResult
+func (m *MatchResult) Matches() uint {
+ return m.matches
+}
+
+// Excludes returns the amount of excludes of an MatchResult
+func (m *MatchResult) Excludes() uint {
+ return m.excludes
+}
+
+// MatchesResult verifies the provided filepath against all patterns.
+// It returns the `*MatchResult` result for the patterns on success, otherwise
+// an error. This method is not safe to be called concurrently.
+func (pm *PatternMatcher) MatchesResult(file string) (res *MatchResult, err error) {
+ file = filepath.FromSlash(file)
+ parentPath := filepath.Dir(file)
+ parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
+ res = &MatchResult{false, 0, 0}
+
+ for _, pattern := range pm.patterns {
+ negative := false
+
+ if pattern.exclusion {
+ negative = true
+ }
+
+ match, err := pattern.match(file)
+ if err != nil {
+ return nil, err
+ }
+
+ if !match && parentPath != "." {
+ // Check to see if the pattern matches one of our parent dirs.
+ if len(pattern.dirs) <= len(parentPathDirs) {
+ match, _ = pattern.match(strings.Join(
+ parentPathDirs[:len(pattern.dirs)],
+ string(os.PathSeparator)),
+ )
+ }
+ }
+
+ if match {
+ res.isMatched = !negative
+ if negative {
+ res.excludes++
+ } else {
+ res.matches++
+ }
+ }
+ }
+
+ if res.matches > 0 {
+ logrus.Debugf("Skipping excluded path: %s", file)
+ }
+
+ return res, nil
+}
+
+// IsMatch verifies the provided filepath against all patterns and returns true
+// if it matches. A match is valid if the last match is a positive one.
+// It returns an error on failure and is not safe to be called concurrently.
+func (pm *PatternMatcher) IsMatch(file string) (matched bool, err error) {
+ res, err := pm.MatchesResult(file)
+ if err != nil {
+ return false, err
+ }
+ return res.isMatched, nil
+}
+
// Exclusions returns true if any of the patterns define exclusions
func (pm *PatternMatcher) Exclusions() bool {
return pm.exclusions
@@ -228,7 +308,7 @@ func Matches(file string, patterns []string) (bool, error) {
return false, nil
}
- return pm.Matches(file)
+ return pm.IsMatch(file)
}
// CopyFile copies from src to dst until either EOF is reached
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
index bdbdf1b50..6b0f55030 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
@@ -49,7 +49,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
paths = append(paths, dirPath)
}
}
- if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
return err
}
} else {
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
index dbf6bc4c9..9c8508397 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
@@ -4,14 +4,12 @@ package idtools
import (
"os"
-
- "github.com/containers/storage/pkg/system"
)
// Platforms such as Windows do not support the UID/GID concept. So make this
// just a wrapper around system.MkdirAll.
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
- if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
return err
}
return nil
diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
index be8680fbc..e2cf30b41 100644
--- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
+++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
@@ -93,7 +93,7 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File
logrus.Errorf("Error getting loopback backing file: %s", err)
return nil, ErrGetLoopbackBackingFile
}
- if dev != st.Dev || ino != st.Ino {
+ if dev != uint64(st.Dev) || ino != st.Ino {
logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino)
}
diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go
index 05d537dc8..f4cf2826e 100644
--- a/vendor/github.com/containers/storage/pkg/loopback/loopback.go
+++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go
@@ -53,7 +53,7 @@ func FindLoopDeviceFor(file *os.File) *os.File {
}
dev, inode, err := getLoopbackBackingFile(file)
- if err == nil && dev == targetDevice && inode == targetInode {
+ if err == nil && dev == uint64(targetDevice) && inode == targetInode {
return file
}
file.Close()
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
index e59867277..3d3829236 100644
--- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
+++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
@@ -63,7 +63,7 @@ func GetKernelVersion() (*VersionInfo, error) {
}
KVI.major = int(dwVersion & 0xFF)
- KVI.minor = int((dwVersion & 0XFF00) >> 8)
+ KVI.minor = int((dwVersion & 0xFF00) >> 8)
KVI.build = int((dwVersion & 0xFFFF0000) >> 16)
return KVI, nil
diff --git a/vendor/github.com/containers/storage/pkg/system/filesys.go b/vendor/github.com/containers/storage/pkg/system/filesys.go
deleted file mode 100644
index 102565f76..000000000
--- a/vendor/github.com/containers/storage/pkg/system/filesys.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// +build !windows
-
-package system
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
-)
-
-// MkdirAllWithACL is a wrapper for MkdirAll on unix systems.
-func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
- return MkdirAll(path, perm, sddl)
-}
-
-// MkdirAll creates a directory named path along with any necessary parents,
-// with permission specified by attribute perm for all dir created.
-func MkdirAll(path string, perm os.FileMode, sddl string) error {
- return os.MkdirAll(path, perm)
-}
-
-// IsAbs is a platform-specific wrapper for filepath.IsAbs.
-func IsAbs(path string) bool {
- return filepath.IsAbs(path)
-}
-
-// The functions below here are wrappers for the equivalents in the os and ioutils packages.
-// They are passthrough on Unix platforms, and only relevant on Windows.
-
-// CreateSequential creates the named file with mode 0666 (before umask), truncating
-// it if it already exists. If successful, methods on the returned
-// File can be used for I/O; the associated file descriptor has mode
-// O_RDWR.
-// If there is an error, it will be of type *PathError.
-func CreateSequential(name string) (*os.File, error) {
- return os.Create(name)
-}
-
-// OpenSequential opens the named file for reading. If successful, methods on
-// the returned file can be used for reading; the associated file
-// descriptor has mode O_RDONLY.
-// If there is an error, it will be of type *PathError.
-func OpenSequential(name string) (*os.File, error) {
- return os.Open(name)
-}
-
-// OpenFileSequential is the generalized open call; most users will use Open
-// or Create instead. It opens the named file with specified flag
-// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
-// methods on the returned File can be used for I/O.
-// If there is an error, it will be of type *PathError.
-func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) {
- return os.OpenFile(name, flag, perm)
-}
-
-// TempFileSequential creates a new temporary file in the directory dir
-// with a name beginning with prefix, opens the file for reading
-// and writing, and returns the resulting *os.File.
-// If dir is the empty string, TempFile uses the default directory
-// for temporary files (see os.TempDir).
-// Multiple programs calling TempFile simultaneously
-// will not choose the same file. The caller can use f.Name()
-// to find the pathname of the file. It is the caller's responsibility
-// to remove the file when no longer needed.
-func TempFileSequential(dir, prefix string) (f *os.File, err error) {
- return ioutil.TempFile(dir, prefix)
-}
diff --git a/vendor/github.com/containers/storage/pkg/system/filesys_windows.go b/vendor/github.com/containers/storage/pkg/system/filesys_windows.go
deleted file mode 100644
index a61b53d0b..000000000
--- a/vendor/github.com/containers/storage/pkg/system/filesys_windows.go
+++ /dev/null
@@ -1,298 +0,0 @@
-// +build windows
-
-package system
-
-import (
- "os"
- "path/filepath"
- "regexp"
- "strconv"
- "strings"
- "sync"
- "syscall"
- "time"
- "unsafe"
-
- winio "github.com/Microsoft/go-winio"
- "golang.org/x/sys/windows"
-)
-
-const (
- // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System
- SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
- // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System
- SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
-)
-
-// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
-// with an appropriate SDDL defined ACL.
-func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
- return mkdirall(path, true, sddl)
-}
-
-// MkdirAll implementation that is volume path aware for Windows.
-func MkdirAll(path string, _ os.FileMode, sddl string) error {
- return mkdirall(path, false, sddl)
-}
-
-// mkdirall is a custom version of os.MkdirAll modified for use on Windows
-// so that it is both volume path aware, and can create a directory with
-// a DACL.
-func mkdirall(path string, applyACL bool, sddl string) error {
- if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
- return nil
- }
-
- // The rest of this method is largely copied from os.MkdirAll and should be kept
- // as-is to ensure compatibility.
-
- // Fast path: if we can tell whether path is a directory or file, stop with success or error.
- dir, err := os.Stat(path)
- if err == nil {
- if dir.IsDir() {
- return nil
- }
- return &os.PathError{
- Op: "mkdir",
- Path: path,
- Err: syscall.ENOTDIR,
- }
- }
-
- // Slow path: make sure parent exists and then call Mkdir for path.
- i := len(path)
- for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
- i--
- }
-
- j := i
- for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
- j--
- }
-
- if j > 1 {
- // Create parent
- err = mkdirall(path[0:j-1], false, sddl)
- if err != nil {
- return err
- }
- }
-
- // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result.
- if applyACL {
- err = mkdirWithACL(path, sddl)
- } else {
- err = os.Mkdir(path, 0)
- }
-
- if err != nil {
- // Handle arguments like "foo/." by
- // double-checking that directory doesn't exist.
- dir, err1 := os.Lstat(path)
- if err1 == nil && dir.IsDir() {
- return nil
- }
- return err
- }
- return nil
-}
-
-// mkdirWithACL creates a new directory. If there is an error, it will be of
-// type *PathError. .
-//
-// This is a modified and combined version of os.Mkdir and windows.Mkdir
-// in golang to cater for creating a directory am ACL permitting full
-// access, with inheritance, to any subfolder/file for Built-in Administrators
-// and Local System.
-func mkdirWithACL(name string, sddl string) error {
- sa := windows.SecurityAttributes{Length: 0}
- sd, err := winio.SddlToSecurityDescriptor(sddl)
- if err != nil {
- return &os.PathError{Op: "mkdir", Path: name, Err: err}
- }
- sa.Length = uint32(unsafe.Sizeof(sa))
- sa.InheritHandle = 1
- sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0]))
-
- namep, err := windows.UTF16PtrFromString(name)
- if err != nil {
- return &os.PathError{Op: "mkdir", Path: name, Err: err}
- }
-
- e := windows.CreateDirectory(namep, &sa)
- if e != nil {
- return &os.PathError{Op: "mkdir", Path: name, Err: e}
- }
- return nil
-}
-
-// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
-// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
-// as it doesn't start with a drive-letter/colon combination. However, in
-// docker we need to verify things such as WORKDIR /windows/system32 in
-// a Dockerfile (which gets translated to \windows\system32 when being processed
-// by the daemon. This SHOULD be treated as absolute from a docker processing
-// perspective.
-func IsAbs(path string) bool {
- if !filepath.IsAbs(path) {
- if !strings.HasPrefix(path, string(os.PathSeparator)) {
- return false
- }
- }
- return true
-}
-
-// The origin of the functions below here are the golang OS and windows packages,
-// slightly modified to only cope with files, not directories due to the
-// specific use case.
-//
-// The alteration is to allow a file on Windows to be opened with
-// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating
-// the standby list, particularly when accessing large files such as layer.tar.
-
-// CreateSequential creates the named file with mode 0666 (before umask), truncating
-// it if it already exists. If successful, methods on the returned
-// File can be used for I/O; the associated file descriptor has mode
-// O_RDWR.
-// If there is an error, it will be of type *PathError.
-func CreateSequential(name string) (*os.File, error) {
- return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)
-}
-
-// OpenSequential opens the named file for reading. If successful, methods on
-// the returned file can be used for reading; the associated file
-// descriptor has mode O_RDONLY.
-// If there is an error, it will be of type *PathError.
-func OpenSequential(name string) (*os.File, error) {
- return OpenFileSequential(name, os.O_RDONLY, 0)
-}
-
-// OpenFileSequential is the generalized open call; most users will use Open
-// or Create instead.
-// If there is an error, it will be of type *PathError.
-func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) {
- if name == "" {
- return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
- }
- r, errf := windowsOpenFileSequential(name, flag, 0)
- if errf == nil {
- return r, nil
- }
- return nil, &os.PathError{Op: "open", Path: name, Err: errf}
-}
-
-func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) {
- r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0)
- if e != nil {
- return nil, e
- }
- return os.NewFile(uintptr(r), name), nil
-}
-
-func makeInheritSa() *windows.SecurityAttributes {
- var sa windows.SecurityAttributes
- sa.Length = uint32(unsafe.Sizeof(sa))
- sa.InheritHandle = 1
- return &sa
-}
-
-func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) {
- if len(path) == 0 {
- return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND
- }
- pathp, err := windows.UTF16PtrFromString(path)
- if err != nil {
- return windows.InvalidHandle, err
- }
- var access uint32
- switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) {
- case windows.O_RDONLY:
- access = windows.GENERIC_READ
- case windows.O_WRONLY:
- access = windows.GENERIC_WRITE
- case windows.O_RDWR:
- access = windows.GENERIC_READ | windows.GENERIC_WRITE
- }
- if mode&windows.O_CREAT != 0 {
- access |= windows.GENERIC_WRITE
- }
- if mode&windows.O_APPEND != 0 {
- access &^= windows.GENERIC_WRITE
- access |= windows.FILE_APPEND_DATA
- }
- sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE)
- var sa *windows.SecurityAttributes
- if mode&windows.O_CLOEXEC == 0 {
- sa = makeInheritSa()
- }
- var createmode uint32
- switch {
- case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL):
- createmode = windows.CREATE_NEW
- case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC):
- createmode = windows.CREATE_ALWAYS
- case mode&windows.O_CREAT == windows.O_CREAT:
- createmode = windows.OPEN_ALWAYS
- case mode&windows.O_TRUNC == windows.O_TRUNC:
- createmode = windows.TRUNCATE_EXISTING
- default:
- createmode = windows.OPEN_EXISTING
- }
- // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
- //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
- const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
- h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
- return h, e
-}
-
-// Helpers for TempFileSequential
-var rand uint32
-var randmu sync.Mutex
-
-func reseed() uint32 {
- return uint32(time.Now().UnixNano() + int64(os.Getpid()))
-}
-func nextSuffix() string {
- randmu.Lock()
- r := rand
- if r == 0 {
- r = reseed()
- }
- r = r*1664525 + 1013904223 // constants from Numerical Recipes
- rand = r
- randmu.Unlock()
- return strconv.Itoa(int(1e9 + r%1e9))[1:]
-}
-
-// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential
-// file access. Below is the original comment from golang:
-// TempFile creates a new temporary file in the directory dir
-// with a name beginning with prefix, opens the file for reading
-// and writing, and returns the resulting *os.File.
-// If dir is the empty string, TempFile uses the default directory
-// for temporary files (see os.TempDir).
-// Multiple programs calling TempFile simultaneously
-// will not choose the same file. The caller can use f.Name()
-// to find the pathname of the file. It is the caller's responsibility
-// to remove the file when no longer needed.
-func TempFileSequential(dir, prefix string) (f *os.File, err error) {
- if dir == "" {
- dir = os.TempDir()
- }
-
- nconflict := 0
- for i := 0; i < 10000; i++ {
- name := filepath.Join(dir, prefix+nextSuffix())
- f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
- if os.IsExist(err) {
- if nconflict++; nconflict > 10 {
- randmu.Lock()
- rand = reseed()
- randmu.Unlock()
- }
- continue
- }
- break
- }
- return
-}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go
index 1939f9518..af7af20fa 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_linux.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go
@@ -8,7 +8,7 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
mode: s.Mode,
uid: s.Uid,
gid: s.Gid,
- rdev: s.Rdev,
+ rdev: uint64(s.Rdev),
mtim: s.Mtim}, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
index 98b111be4..24c3f37ef 100644
--- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
@@ -1,24 +1,46 @@
package system
-import "golang.org/x/sys/unix"
+import (
+ "bytes"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // Value is larger than the maximum size allowed
+ E2BIG syscall.Errno = unix.E2BIG
+
+ // Operation not supported
+ EOPNOTSUPP syscall.Errno = unix.EOPNOTSUPP
+)
// Lgetxattr retrieves the value of the extended attribute identified by attr
// and associated with the given path in the file system.
// It will returns a nil slice and nil error if the xattr is not set.
func Lgetxattr(path string, attr string) ([]byte, error) {
+ // Start with a 128 length byte array
dest := make([]byte, 128)
sz, errno := unix.Lgetxattr(path, attr, dest)
- if errno == unix.ENODATA {
+
+ switch {
+ case errno == unix.ENODATA:
return nil, nil
- }
- if errno == unix.ERANGE {
+ case errno == unix.ERANGE:
+ // 128 byte array might just not be good enough. A dummy buffer is used
+ // to get the real size of the xattrs on disk
+ sz, errno = unix.Lgetxattr(path, attr, []byte{})
+ if errno != nil {
+ return nil, errno
+ }
dest = make([]byte, sz)
sz, errno = unix.Lgetxattr(path, attr, dest)
- }
- if errno != nil {
+ if errno != nil {
+ return nil, errno
+ }
+ case errno != nil:
return nil, errno
}
-
return dest[:sz], nil
}
@@ -27,3 +49,32 @@ func Lgetxattr(path string, attr string) ([]byte, error) {
func Lsetxattr(path string, attr string, data []byte, flags int) error {
return unix.Lsetxattr(path, attr, data, flags)
}
+
+// Llistxattr lists extended attributes associated with the given path
+// in the file system.
+func Llistxattr(path string) ([]string, error) {
+ var dest []byte
+
+ for {
+ sz, err := unix.Llistxattr(path, dest)
+ if err != nil {
+ return nil, err
+ }
+
+ if sz > len(dest) {
+ dest = make([]byte, sz)
+ } else {
+ dest = dest[:sz]
+ break
+ }
+ }
+
+ var attrs []string
+ for _, token := range bytes.Split(dest, []byte{0}) {
+ if len(token) > 0 {
+ attrs = append(attrs, string(token))
+ }
+ }
+
+ return attrs, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
index 0114f2227..bc8b8e3a5 100644
--- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
@@ -2,6 +2,16 @@
package system
+import "syscall"
+
+const (
+ // Value is larger than the maximum size allowed
+ E2BIG syscall.Errno = syscall.Errno(0)
+
+ // Operation not supported
+ EOPNOTSUPP syscall.Errno = syscall.Errno(0)
+)
+
// Lgetxattr is not supported on platforms other than linux.
func Lgetxattr(path string, attr string) ([]byte, error) {
return nil, ErrNotSupportedPlatform
@@ -11,3 +21,8 @@ func Lgetxattr(path string, attr string) ([]byte, error) {
func Lsetxattr(path string, attr string, data []byte, flags int) error {
return ErrNotSupportedPlatform
}
+
+// Llistxattr is not supported on platforms other than linux.
+func Llistxattr(path string) ([]string, error) {
+ return nil, ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf
index efd46eefb..b7b73ed38 100644
--- a/vendor/github.com/containers/storage/storage.conf
+++ b/vendor/github.com/containers/storage/storage.conf
@@ -21,25 +21,6 @@ graphroot = "/var/lib/containers/storage"
additionalimagestores = [
]
-# Size is used to set a maximum size of the container image. Only supported by
-# certain container storage drivers.
-size = ""
-
-# Path to an helper program to use for mounting the file system instead of mounting it
-# directly.
-#mount_program = "/usr/bin/fuse-overlayfs"
-
-# mountopt specifies comma separated list of extra mount options
-mountopt = "nodev"
-
-# ignore_chown_errors can be set to allow a non privileged user running with
-# a single UID within a user namespace to run containers. The user can pull
-# and use any image even those with multiple uids. Note multiple UIDs will be
-# squasheddown to the default uid in the container. These images will have no
-# separation between the users in the container. Only supported for the overlay
-# and vfs drivers.
-#ignore_chown_errors = false
-
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
# a container, to the UIDs/GIDs as they should appear outside of the container,
# and the length of the range of UIDs/GIDs. Additional mapped sets can be
@@ -61,6 +42,25 @@ mountopt = "nodev"
# remap-user = "storage"
# remap-group = "storage"
+[storage.options.overlay]
+# ignore_chown_errors can be set to allow a non privileged user running with
+# a single UID within a user namespace to run containers. The user can pull
+# and use any image even those with multiple uids. Note multiple UIDs will be
+# squashed down to the default uid in the container. These images will have no
+# separation between the users in the container. Only supported for the overlay
+# and vfs drivers.
+#ignore_chown_errors = false
+
+# Path to an helper program to use for mounting the file system instead of mounting it
+# directly.
+#mount_program = "/usr/bin/fuse-overlayfs"
+
+# mountopt specifies comma separated list of extra mount options
+mountopt = "nodev"
+
+# Size is used to set a maximum size of the container image.
+# size = ""
+
[storage.options.thinpool]
# Storage Options for thinpool
@@ -111,6 +111,9 @@ mountopt = "nodev"
# device.
# mkfsarg = ""
+# Size is used to set a maximum size of the container image.
+# size = ""
+
# use_deferred_removal marks devicemapper block device for deferred removal.
# If the thinpool is in use when the driver attempts to remove it, the driver
# tells the kernel to remove it as soon as possible. Note this does not free
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 6e4bd4ee0..654e86880 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -18,7 +18,7 @@ import (
"github.com/BurntSushi/toml"
drivers "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
- "github.com/containers/storage/pkg/config"
+ cfg "github.com/containers/storage/pkg/config"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
@@ -3274,10 +3274,10 @@ func DefaultConfigFile(rootless bool) (string, error) {
// TOML-friendly explicit tables used for conversions.
type tomlConfig struct {
Storage struct {
- Driver string `toml:"driver"`
- RunRoot string `toml:"runroot"`
- GraphRoot string `toml:"graphroot"`
- Options struct{ config.OptionsConfig } `toml:"options"`
+ Driver string `toml:"driver"`
+ RunRoot string `toml:"runroot"`
+ GraphRoot string `toml:"graphroot"`
+ Options cfg.OptionsConfig `toml:"options"`
} `toml:"storage"`
}
@@ -3307,50 +3307,6 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
if config.Storage.GraphRoot != "" {
storeOptions.GraphRoot = config.Storage.GraphRoot
}
- if config.Storage.Options.Thinpool.AutoExtendPercent != "" {
- storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", config.Storage.Options.Thinpool.AutoExtendPercent))
- }
-
- if config.Storage.Options.Thinpool.AutoExtendThreshold != "" {
- storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", config.Storage.Options.Thinpool.AutoExtendThreshold))
- }
-
- if config.Storage.Options.Thinpool.BaseSize != "" {
- storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.basesize=%s", config.Storage.Options.Thinpool.BaseSize))
- }
- if config.Storage.Options.Thinpool.BlockSize != "" {
- storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.blocksize=%s", config.Storage.Options.Thinpool.BlockSize))
- }
- if config.Storage.Options.Thinpool.DirectLvmDevice != "" {
- storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device=%s", config.Storage.Options.Thinpool.DirectLvmDevice))
- }
- if config.Storage.Options.Thinpool.DirectLvmDeviceForce != "" {
- storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device_force=%s", config.Storage.Options.Thinpool.DirectLvmDeviceForce))
- }
- if config.Storage.Options.Thinpool.Fs != "" {
- storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.fs=%s", config.Storage.Options.Thinpool.Fs))
- }
- if config.Storage.Options.Thinpool.LogLevel != "" {
- storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.libdm_log_level=%s", config.Storage.Options.Thinpool.LogLevel))
- }
- if config.Storage.Options.Thinpool.MinFreeSpace != "" {
- storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.min_free_space=%s", config.Storage.Options.Thinpool.MinFreeSpace))
- }
- if config.Storage.Options.Thinpool.MkfsArg != "" {
- storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.mkfsarg=%s", config.Storage.Options.Thinpool.MkfsArg))
- }
- if config.Storage.Options.Thinpool.MountOpt != "" {
- storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.Thinpool.MountOpt))
- }
- if config.Storage.Options.Thinpool.UseDeferredDeletion != "" {
- storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_deletion=%s", config.Storage.Options.Thinpool.UseDeferredDeletion))
- }
- if config.Storage.Options.Thinpool.UseDeferredRemoval != "" {
- storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_removal=%s", config.Storage.Options.Thinpool.UseDeferredRemoval))
- }
- if config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries != "" {
- storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries))
- }
for _, s := range config.Storage.Options.AdditionalImageStores {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s))
}
@@ -3397,6 +3353,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
if os.Getenv("STORAGE_DRIVER") != "" {
storeOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER")
}
+
+ storeOptions.GraphDriverOptions = cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)
+
if os.Getenv("STORAGE_OPTS") != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...)
}
diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go
index 54627731a..5aa7c0851 100644
--- a/vendor/github.com/containers/storage/utils.go
+++ b/vendor/github.com/containers/storage/utils.go
@@ -70,6 +70,18 @@ func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap stri
// GetRootlessRuntimeDir returns the runtime directory when running as non root
func GetRootlessRuntimeDir(rootlessUid int) (string, error) {
+ path, err := getRootlessRuntimeDir(rootlessUid)
+ if err != nil {
+ return "", err
+ }
+ path = filepath.Join(path, "containers")
+ if err := os.MkdirAll(path, 0700); err != nil {
+ return "", errors.Wrapf(err, "unable to make rootless runtime dir %s", path)
+ }
+ return path, nil
+}
+
+func getRootlessRuntimeDir(rootlessUid int) (string, error) {
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir != "" {
diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go
deleted file mode 100644
index 8298d309a..000000000
--- a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go
+++ /dev/null
@@ -1,42 +0,0 @@
-//+build !noasm
-//+build !appengine
-//+build !gccgo
-
-// Copyright 2015, Klaus Post, see LICENSE for details.
-
-package flate
-
-import (
- "github.com/klauspost/cpuid"
-)
-
-// crc32sse returns a hash for the first 4 bytes of the slice
-// len(a) must be >= 4.
-//go:noescape
-func crc32sse(a []byte) uint32
-
-// crc32sseAll calculates hashes for each 4-byte set in a.
-// dst must be east len(a) - 4 in size.
-// The size is not checked by the assembly.
-//go:noescape
-func crc32sseAll(a []byte, dst []uint32)
-
-// matchLenSSE4 returns the number of matching bytes in a and b
-// up to length 'max'. Both slices must be at least 'max'
-// bytes in size.
-//
-// TODO: drop the "SSE4" name, since it doesn't use any SSE instructions.
-//
-//go:noescape
-func matchLenSSE4(a, b []byte, max int) int
-
-// histogram accumulates a histogram of b in h.
-// h must be at least 256 entries in length,
-// and must be cleared before calling this function.
-//go:noescape
-func histogram(b []byte, h []int32)
-
-// Detect SSE 4.2 feature.
-func init() {
- useSSE42 = cpuid.CPU.SSE42()
-}
diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s
deleted file mode 100644
index a79943727..000000000
--- a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s
+++ /dev/null
@@ -1,214 +0,0 @@
-//+build !noasm
-//+build !appengine
-//+build !gccgo
-
-// Copyright 2015, Klaus Post, see LICENSE for details.
-
-// func crc32sse(a []byte) uint32
-TEXT ·crc32sse(SB), 4, $0
- MOVQ a+0(FP), R10
- XORQ BX, BX
-
- // CRC32 dword (R10), EBX
- BYTE $0xF2; BYTE $0x41; BYTE $0x0f
- BYTE $0x38; BYTE $0xf1; BYTE $0x1a
-
- MOVL BX, ret+24(FP)
- RET
-
-// func crc32sseAll(a []byte, dst []uint32)
-TEXT ·crc32sseAll(SB), 4, $0
- MOVQ a+0(FP), R8 // R8: src
- MOVQ a_len+8(FP), R10 // input length
- MOVQ dst+24(FP), R9 // R9: dst
- SUBQ $4, R10
- JS end
- JZ one_crc
- MOVQ R10, R13
- SHRQ $2, R10 // len/4
- ANDQ $3, R13 // len&3
- XORQ BX, BX
- ADDQ $1, R13
- TESTQ R10, R10
- JZ rem_loop
-
-crc_loop:
- MOVQ (R8), R11
- XORQ BX, BX
- XORQ DX, DX
- XORQ DI, DI
- MOVQ R11, R12
- SHRQ $8, R11
- MOVQ R12, AX
- MOVQ R11, CX
- SHRQ $16, R12
- SHRQ $16, R11
- MOVQ R12, SI
-
- // CRC32 EAX, EBX
- BYTE $0xF2; BYTE $0x0f
- BYTE $0x38; BYTE $0xf1; BYTE $0xd8
-
- // CRC32 ECX, EDX
- BYTE $0xF2; BYTE $0x0f
- BYTE $0x38; BYTE $0xf1; BYTE $0xd1
-
- // CRC32 ESI, EDI
- BYTE $0xF2; BYTE $0x0f
- BYTE $0x38; BYTE $0xf1; BYTE $0xfe
- MOVL BX, (R9)
- MOVL DX, 4(R9)
- MOVL DI, 8(R9)
-
- XORQ BX, BX
- MOVL R11, AX
-
- // CRC32 EAX, EBX
- BYTE $0xF2; BYTE $0x0f
- BYTE $0x38; BYTE $0xf1; BYTE $0xd8
- MOVL BX, 12(R9)
-
- ADDQ $16, R9
- ADDQ $4, R8
- XORQ BX, BX
- SUBQ $1, R10
- JNZ crc_loop
-
-rem_loop:
- MOVL (R8), AX
-
- // CRC32 EAX, EBX
- BYTE $0xF2; BYTE $0x0f
- BYTE $0x38; BYTE $0xf1; BYTE $0xd8
-
- MOVL BX, (R9)
- ADDQ $4, R9
- ADDQ $1, R8
- XORQ BX, BX
- SUBQ $1, R13
- JNZ rem_loop
-
-end:
- RET
-
-one_crc:
- MOVQ $1, R13
- XORQ BX, BX
- JMP rem_loop
-
-// func matchLenSSE4(a, b []byte, max int) int
-TEXT ·matchLenSSE4(SB), 4, $0
- MOVQ a_base+0(FP), SI
- MOVQ b_base+24(FP), DI
- MOVQ DI, DX
- MOVQ max+48(FP), CX
-
-cmp8:
- // As long as we are 8 or more bytes before the end of max, we can load and
- // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
- CMPQ CX, $8
- JLT cmp1
- MOVQ (SI), AX
- MOVQ (DI), BX
- CMPQ AX, BX
- JNE bsf
- ADDQ $8, SI
- ADDQ $8, DI
- SUBQ $8, CX
- JMP cmp8
-
-bsf:
- // If those 8 bytes were not equal, XOR the two 8 byte values, and return
- // the index of the first byte that differs. The BSF instruction finds the
- // least significant 1 bit, the amd64 architecture is little-endian, and
- // the shift by 3 converts a bit index to a byte index.
- XORQ AX, BX
- BSFQ BX, BX
- SHRQ $3, BX
- ADDQ BX, DI
-
- // Subtract off &b[0] to convert from &b[ret] to ret, and return.
- SUBQ DX, DI
- MOVQ DI, ret+56(FP)
- RET
-
-cmp1:
- // In the slices' tail, compare 1 byte at a time.
- CMPQ CX, $0
- JEQ matchLenEnd
- MOVB (SI), AX
- MOVB (DI), BX
- CMPB AX, BX
- JNE matchLenEnd
- ADDQ $1, SI
- ADDQ $1, DI
- SUBQ $1, CX
- JMP cmp1
-
-matchLenEnd:
- // Subtract off &b[0] to convert from &b[ret] to ret, and return.
- SUBQ DX, DI
- MOVQ DI, ret+56(FP)
- RET
-
-// func histogram(b []byte, h []int32)
-TEXT ·histogram(SB), 4, $0
- MOVQ b+0(FP), SI // SI: &b
- MOVQ b_len+8(FP), R9 // R9: len(b)
- MOVQ h+24(FP), DI // DI: Histogram
- MOVQ R9, R8
- SHRQ $3, R8
- JZ hist1
- XORQ R11, R11
-
-loop_hist8:
- MOVQ (SI), R10
-
- MOVB R10, R11
- INCL (DI)(R11*4)
- SHRQ $8, R10
-
- MOVB R10, R11
- INCL (DI)(R11*4)
- SHRQ $8, R10
-
- MOVB R10, R11
- INCL (DI)(R11*4)
- SHRQ $8, R10
-
- MOVB R10, R11
- INCL (DI)(R11*4)
- SHRQ $8, R10
-
- MOVB R10, R11
- INCL (DI)(R11*4)
- SHRQ $8, R10
-
- MOVB R10, R11
- INCL (DI)(R11*4)
- SHRQ $8, R10
-
- MOVB R10, R11
- INCL (DI)(R11*4)
- SHRQ $8, R10
-
- INCL (DI)(R10*4)
-
- ADDQ $8, SI
- DECQ R8
- JNZ loop_hist8
-
-hist1:
- ANDQ $7, R9
- JZ end_hist
- XORQ R10, R10
-
-loop_hist1:
- MOVB (SI), R10
- INCL (DI)(R10*4)
- INCQ SI
- DECQ R9
- JNZ loop_hist1
-
-end_hist:
- RET
diff --git a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go
deleted file mode 100644
index dcf43bd50..000000000
--- a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go
+++ /dev/null
@@ -1,35 +0,0 @@
-//+build !amd64 noasm appengine gccgo
-
-// Copyright 2015, Klaus Post, see LICENSE for details.
-
-package flate
-
-func init() {
- useSSE42 = false
-}
-
-// crc32sse should never be called.
-func crc32sse(a []byte) uint32 {
- panic("no assembler")
-}
-
-// crc32sseAll should never be called.
-func crc32sseAll(a []byte, dst []uint32) {
- panic("no assembler")
-}
-
-// matchLenSSE4 should never be called.
-func matchLenSSE4(a, b []byte, max int) int {
- panic("no assembler")
- return 0
-}
-
-// histogram accumulates a histogram of b in h.
-//
-// len(h) must be >= 256, and h's elements must be all zeroes.
-func histogram(b []byte, h []int32) {
- h = h[:256]
- for _, t := range b {
- h[t]++
- }
-}
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
index 628795120..20c94f596 100644
--- a/vendor/github.com/klauspost/compress/flate/deflate.go
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -50,8 +50,6 @@ const (
skipNever = math.MaxInt32
)
-var useSSE42 bool
-
type compressionLevel struct {
good, lazy, nice, chain, fastSkipHashing, level int
}
@@ -97,9 +95,8 @@ type advancedState struct {
hashOffset int
// input window: unprocessed data is window[index:windowEnd]
- index int
- bulkHasher func([]byte, []uint32)
- hashMatch [maxMatchLength + minMatchLength]uint32
+ index int
+ hashMatch [maxMatchLength + minMatchLength]uint32
}
type compressor struct {
@@ -120,7 +117,7 @@ type compressor struct {
// queued output tokens
tokens tokens
- snap fastEnc
+ fast fastEnc
state *advancedState
}
@@ -164,14 +161,14 @@ func (d *compressor) fillDeflate(b []byte) int {
return n
}
-func (d *compressor) writeBlock(tok tokens, index int, eof bool) error {
+func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
if index > 0 || eof {
var window []byte
if d.blockStart <= index {
window = d.window[d.blockStart:index]
}
d.blockStart = index
- d.w.writeBlock(tok.tokens[:tok.n], eof, window)
+ d.w.writeBlock(tok, eof, window)
return d.w.err
}
return nil
@@ -180,20 +177,20 @@ func (d *compressor) writeBlock(tok tokens, index int, eof bool) error {
// writeBlockSkip writes the current block and uses the number of tokens
// to determine if the block should be stored on no matches, or
// only huffman encoded.
-func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error {
+func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
if index > 0 || eof {
if d.blockStart <= index {
window := d.window[d.blockStart:index]
// If we removed less than a 64th of all literals
// we huffman compress the block.
if int(tok.n) > len(window)-int(tok.n>>6) {
- d.w.writeBlockHuff(eof, window)
+ d.w.writeBlockHuff(eof, window, d.sync)
} else {
// Write a dynamic huffman block.
- d.w.writeBlockDynamic(tok.tokens[:tok.n], eof, window)
+ d.w.writeBlockDynamic(tok, eof, window, d.sync)
}
} else {
- d.w.writeBlock(tok.tokens[:tok.n], eof, nil)
+ d.w.writeBlock(tok, eof, nil)
}
d.blockStart = index
return d.w.err
@@ -208,8 +205,16 @@ func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error {
func (d *compressor) fillWindow(b []byte) {
// Do not fill window if we are in store-only mode,
// use constant or Snappy compression.
- switch d.compressionLevel.level {
- case 0, 1, 2:
+ if d.level == 0 {
+ return
+ }
+ if d.fast != nil {
+ // encode the last data, but discard the result
+ if len(b) > maxMatchOffset {
+ b = b[len(b)-maxMatchOffset:]
+ }
+ d.fast.Encode(&d.tokens, b)
+ d.tokens.Reset()
return
}
s := d.state
@@ -236,7 +241,7 @@ func (d *compressor) fillWindow(b []byte) {
}
dst := s.hashMatch[:dstSize]
- s.bulkHasher(tocheck, dst)
+ bulkHash4(tocheck, dst)
var newH uint32
for i, val := range dst {
di := i + startindex
@@ -284,62 +289,7 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
- n := matchLen(win[i:], wPos, minMatchLook)
-
- if n > length && (n > minMatchLength || pos-i <= 4096) {
- length = n
- offset = pos - i
- ok = true
- if n >= nice {
- // The match is good enough that we don't try to find a better one.
- break
- }
- wEnd = win[pos+n]
- }
- }
- if i == minIndex {
- // hashPrev[i & windowMask] has already been overwritten, so stop now.
- break
- }
- i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
- if i < minIndex || i < 0 {
- break
- }
- }
- return
-}
-
-// Try to find a match starting at index whose length is greater than prevSize.
-// We only look at chainCount possibilities before giving up.
-// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
-func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
- minMatchLook := maxMatchLength
- if lookahead < minMatchLook {
- minMatchLook = lookahead
- }
-
- win := d.window[0 : pos+minMatchLook]
-
- // We quit when we get a match that's at least nice long
- nice := len(win) - pos
- if d.nice < nice {
- nice = d.nice
- }
-
- // If we've got a match that's good enough, only look in 1/4 the chain.
- tries := d.chain
- length = prevLength
- if length >= d.good {
- tries >>= 2
- }
-
- wEnd := win[pos+length]
- wPos := win[pos:]
- minIndex := pos - windowSize
-
- for i := prevHead; tries > 0; tries-- {
- if wEnd == win[i+length] {
- n := matchLenSSE4(win[i:], wPos, minMatchLook)
+ n := matchLen(win[i:i+minMatchLook], wPos)
if n > length && (n > minMatchLength || pos-i <= 4096) {
length = n
@@ -372,42 +322,27 @@ func (d *compressor) writeStoredBlock(buf []byte) error {
return d.w.err
}
-const hashmul = 0x1e35a7bd
-
// hash4 returns a hash representation of the first 4 bytes
// of the supplied slice.
// The caller must ensure that len(b) >= 4.
func hash4(b []byte) uint32 {
- return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits)
+ b = b[:4]
+ return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits)
}
// bulkHash4 will compute hashes using the same
// algorithm as hash4
func bulkHash4(b []byte, dst []uint32) {
- if len(b) < minMatchLength {
+ if len(b) < 4 {
return
}
hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
- dst[0] = (hb * hashmul) >> (32 - hashBits)
- end := len(b) - minMatchLength + 1
+ dst[0] = hash4u(hb, hashBits)
+ end := len(b) - 4 + 1
for i := 1; i < end; i++ {
hb = (hb << 8) | uint32(b[i+3])
- dst[i] = (hb * hashmul) >> (32 - hashBits)
- }
-}
-
-// matchLen returns the number of matching bytes in a and b
-// up to length 'max'. Both slices must be at least 'max'
-// bytes in size.
-func matchLen(a, b []byte, max int) int {
- a = a[:max]
- b = b[:len(a)]
- for i, av := range a {
- if b[i] != av {
- return i
- }
+ dst[i] = hash4u(hb, hashBits)
}
- return max
}
func (d *compressor) initDeflate() {
@@ -424,149 +359,6 @@ func (d *compressor) initDeflate() {
s.offset = 0
s.hash = 0
s.chainHead = -1
- s.bulkHasher = bulkHash4
- if useSSE42 {
- s.bulkHasher = crc32sseAll
- }
-}
-
-// Assumes that d.fastSkipHashing != skipNever,
-// otherwise use deflateLazy
-func (d *compressor) deflate() {
- s := d.state
- // Sanity enables additional runtime tests.
- // It's intended to be used during development
- // to supplement the currently ad-hoc unit tests.
- const sanity = false
-
- if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
- return
- }
-
- s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
- if s.index < s.maxInsertIndex {
- s.hash = hash4(d.window[s.index : s.index+minMatchLength])
- }
-
- for {
- if sanity && s.index > d.windowEnd {
- panic("index > windowEnd")
- }
- lookahead := d.windowEnd - s.index
- if lookahead < minMatchLength+maxMatchLength {
- if !d.sync {
- return
- }
- if sanity && s.index > d.windowEnd {
- panic("index > windowEnd")
- }
- if lookahead == 0 {
- if d.tokens.n > 0 {
- if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.n = 0
- }
- return
- }
- }
- if s.index < s.maxInsertIndex {
- // Update the hash
- s.hash = hash4(d.window[s.index : s.index+minMatchLength])
- ch := s.hashHead[s.hash&hashMask]
- s.chainHead = int(ch)
- s.hashPrev[s.index&windowMask] = ch
- s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset)
- }
- s.length = minMatchLength - 1
- s.offset = 0
- minIndex := s.index - windowSize
- if minIndex < 0 {
- minIndex = 0
- }
-
- if s.chainHead-s.hashOffset >= minIndex && lookahead > minMatchLength-1 {
- if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
- s.length = newLength
- s.offset = newOffset
- }
- }
- if s.length >= minMatchLength {
- s.ii = 0
- // There was a match at the previous step, and the current match is
- // not better. Output the previous match.
- // "s.length-3" should NOT be "s.length-minMatchLength", since the format always assume 3
- d.tokens.tokens[d.tokens.n] = matchToken(uint32(s.length-3), uint32(s.offset-minOffsetSize))
- d.tokens.n++
- // Insert in the hash table all strings up to the end of the match.
- // index and index-1 are already inserted. If there is not enough
- // lookahead, the last two strings are not inserted into the hash
- // table.
- if s.length <= d.fastSkipHashing {
- var newIndex int
- newIndex = s.index + s.length
- // Calculate missing hashes
- end := newIndex
- if end > s.maxInsertIndex {
- end = s.maxInsertIndex
- }
- end += minMatchLength - 1
- startindex := s.index + 1
- if startindex > s.maxInsertIndex {
- startindex = s.maxInsertIndex
- }
- tocheck := d.window[startindex:end]
- dstSize := len(tocheck) - minMatchLength + 1
- if dstSize > 0 {
- dst := s.hashMatch[:dstSize]
- bulkHash4(tocheck, dst)
- var newH uint32
- for i, val := range dst {
- di := i + startindex
- newH = val & hashMask
- // Get previous value with the same hash.
- // Our chain should point to the previous value.
- s.hashPrev[di&windowMask] = s.hashHead[newH]
- // Set the head of the hash chain to us.
- s.hashHead[newH] = uint32(di + s.hashOffset)
- }
- s.hash = newH
- }
- s.index = newIndex
- } else {
- // For matches this long, we don't bother inserting each individual
- // item into the table.
- s.index += s.length
- if s.index < s.maxInsertIndex {
- s.hash = hash4(d.window[s.index : s.index+minMatchLength])
- }
- }
- if d.tokens.n == maxFlateBlockTokens {
- // The block includes the current character
- if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.n = 0
- }
- } else {
- s.ii++
- end := s.index + int(s.ii>>uint(d.fastSkipHashing)) + 1
- if end > d.windowEnd {
- end = d.windowEnd
- }
- for i := s.index; i < end; i++ {
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
- d.tokens.n++
- if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil {
- return
- }
- d.tokens.n = 0
- }
- }
- s.index = end
- }
- }
}
// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
@@ -603,15 +395,14 @@ func (d *compressor) deflateLazy() {
// Flush current output block if any.
if d.byteAvailable {
// There is still one pending token that needs to be flushed
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
- d.tokens.n++
+ d.tokens.AddLiteral(d.window[s.index-1])
d.byteAvailable = false
}
if d.tokens.n > 0 {
- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
- d.tokens.n = 0
+ d.tokens.Reset()
}
return
}
@@ -642,8 +433,7 @@ func (d *compressor) deflateLazy() {
if prevLength >= minMatchLength && s.length <= prevLength {
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
- d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
- d.tokens.n++
+ d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
// Insert in the hash table all strings up to the end of the match.
// index and index-1 are already inserted. If there is not enough
@@ -684,10 +474,10 @@ func (d *compressor) deflateLazy() {
s.length = minMatchLength - 1
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
- d.tokens.n = 0
+ d.tokens.Reset()
}
} else {
// Reset, if we got a match this run.
@@ -697,13 +487,12 @@ func (d *compressor) deflateLazy() {
// We have a byte waiting. Emit it.
if d.byteAvailable {
s.ii++
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
- d.tokens.n++
+ d.tokens.AddLiteral(d.window[s.index-1])
if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
- d.tokens.n = 0
+ d.tokens.Reset()
}
s.index++
@@ -716,343 +505,24 @@ func (d *compressor) deflateLazy() {
break
}
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
- d.tokens.n++
+ d.tokens.AddLiteral(d.window[s.index-1])
if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
- d.tokens.n = 0
+ d.tokens.Reset()
}
s.index++
}
// Flush last byte
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
- d.tokens.n++
+ d.tokens.AddLiteral(d.window[s.index-1])
d.byteAvailable = false
// s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
- d.tokens.n = 0
- }
- }
- } else {
- s.index++
- d.byteAvailable = true
- }
- }
- }
-}
-
-// Assumes that d.fastSkipHashing != skipNever,
-// otherwise use deflateLazySSE
-func (d *compressor) deflateSSE() {
- s := d.state
- // Sanity enables additional runtime tests.
- // It's intended to be used during development
- // to supplement the currently ad-hoc unit tests.
- const sanity = false
-
- if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
- return
- }
-
- s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
- if s.index < s.maxInsertIndex {
- s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
- }
-
- for {
- if sanity && s.index > d.windowEnd {
- panic("index > windowEnd")
- }
- lookahead := d.windowEnd - s.index
- if lookahead < minMatchLength+maxMatchLength {
- if !d.sync {
- return
- }
- if sanity && s.index > d.windowEnd {
- panic("index > windowEnd")
- }
- if lookahead == 0 {
- if d.tokens.n > 0 {
- if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.n = 0
- }
- return
- }
- }
- if s.index < s.maxInsertIndex {
- // Update the hash
- s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
- ch := s.hashHead[s.hash]
- s.chainHead = int(ch)
- s.hashPrev[s.index&windowMask] = ch
- s.hashHead[s.hash] = uint32(s.index + s.hashOffset)
- }
- s.length = minMatchLength - 1
- s.offset = 0
- minIndex := s.index - windowSize
- if minIndex < 0 {
- minIndex = 0
- }
-
- if s.chainHead-s.hashOffset >= minIndex && lookahead > minMatchLength-1 {
- if newLength, newOffset, ok := d.findMatchSSE(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
- s.length = newLength
- s.offset = newOffset
- }
- }
- if s.length >= minMatchLength {
- s.ii = 0
- // There was a match at the previous step, and the current match is
- // not better. Output the previous match.
- // "s.length-3" should NOT be "s.length-minMatchLength", since the format always assume 3
- d.tokens.tokens[d.tokens.n] = matchToken(uint32(s.length-3), uint32(s.offset-minOffsetSize))
- d.tokens.n++
- // Insert in the hash table all strings up to the end of the match.
- // index and index-1 are already inserted. If there is not enough
- // lookahead, the last two strings are not inserted into the hash
- // table.
- if s.length <= d.fastSkipHashing {
- var newIndex int
- newIndex = s.index + s.length
- // Calculate missing hashes
- end := newIndex
- if end > s.maxInsertIndex {
- end = s.maxInsertIndex
- }
- end += minMatchLength - 1
- startindex := s.index + 1
- if startindex > s.maxInsertIndex {
- startindex = s.maxInsertIndex
- }
- tocheck := d.window[startindex:end]
- dstSize := len(tocheck) - minMatchLength + 1
- if dstSize > 0 {
- dst := s.hashMatch[:dstSize]
-
- crc32sseAll(tocheck, dst)
- var newH uint32
- for i, val := range dst {
- di := i + startindex
- newH = val & hashMask
- // Get previous value with the same hash.
- // Our chain should point to the previous value.
- s.hashPrev[di&windowMask] = s.hashHead[newH]
- // Set the head of the hash chain to us.
- s.hashHead[newH] = uint32(di + s.hashOffset)
- }
- s.hash = newH
- }
- s.index = newIndex
- } else {
- // For matches this long, we don't bother inserting each individual
- // item into the table.
- s.index += s.length
- if s.index < s.maxInsertIndex {
- s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
- }
- }
- if d.tokens.n == maxFlateBlockTokens {
- // The block includes the current character
- if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.n = 0
- }
- } else {
- s.ii++
- end := s.index + int(s.ii>>5) + 1
- if end > d.windowEnd {
- end = d.windowEnd
- }
- for i := s.index; i < end; i++ {
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
- d.tokens.n++
- if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil {
- return
- }
- d.tokens.n = 0
- }
- }
- s.index = end
- }
- }
-}
-
-// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
-// meaning it always has lazy matching on.
-func (d *compressor) deflateLazySSE() {
- s := d.state
- // Sanity enables additional runtime tests.
- // It's intended to be used during development
- // to supplement the currently ad-hoc unit tests.
- const sanity = false
-
- if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
- return
- }
-
- s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
- if s.index < s.maxInsertIndex {
- s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
- }
-
- for {
- if sanity && s.index > d.windowEnd {
- panic("index > windowEnd")
- }
- lookahead := d.windowEnd - s.index
- if lookahead < minMatchLength+maxMatchLength {
- if !d.sync {
- return
- }
- if sanity && s.index > d.windowEnd {
- panic("index > windowEnd")
- }
- if lookahead == 0 {
- // Flush current output block if any.
- if d.byteAvailable {
- // There is still one pending token that needs to be flushed
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
- d.tokens.n++
- d.byteAvailable = false
- }
- if d.tokens.n > 0 {
- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.n = 0
- }
- return
- }
- }
- if s.index < s.maxInsertIndex {
- // Update the hash
- s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
- ch := s.hashHead[s.hash]
- s.chainHead = int(ch)
- s.hashPrev[s.index&windowMask] = ch
- s.hashHead[s.hash] = uint32(s.index + s.hashOffset)
- }
- prevLength := s.length
- prevOffset := s.offset
- s.length = minMatchLength - 1
- s.offset = 0
- minIndex := s.index - windowSize
- if minIndex < 0 {
- minIndex = 0
- }
-
- if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
- if newLength, newOffset, ok := d.findMatchSSE(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
- s.length = newLength
- s.offset = newOffset
- }
- }
- if prevLength >= minMatchLength && s.length <= prevLength {
- // There was a match at the previous step, and the current match is
- // not better. Output the previous match.
- d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
- d.tokens.n++
-
- // Insert in the hash table all strings up to the end of the match.
- // index and index-1 are already inserted. If there is not enough
- // lookahead, the last two strings are not inserted into the hash
- // table.
- var newIndex int
- newIndex = s.index + prevLength - 1
- // Calculate missing hashes
- end := newIndex
- if end > s.maxInsertIndex {
- end = s.maxInsertIndex
- }
- end += minMatchLength - 1
- startindex := s.index + 1
- if startindex > s.maxInsertIndex {
- startindex = s.maxInsertIndex
- }
- tocheck := d.window[startindex:end]
- dstSize := len(tocheck) - minMatchLength + 1
- if dstSize > 0 {
- dst := s.hashMatch[:dstSize]
- crc32sseAll(tocheck, dst)
- var newH uint32
- for i, val := range dst {
- di := i + startindex
- newH = val & hashMask
- // Get previous value with the same hash.
- // Our chain should point to the previous value.
- s.hashPrev[di&windowMask] = s.hashHead[newH]
- // Set the head of the hash chain to us.
- s.hashHead[newH] = uint32(di + s.hashOffset)
- }
- s.hash = newH
- }
-
- s.index = newIndex
- d.byteAvailable = false
- s.length = minMatchLength - 1
- if d.tokens.n == maxFlateBlockTokens {
- // The block includes the current character
- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.n = 0
- }
- } else {
- // Reset, if we got a match this run.
- if s.length >= minMatchLength {
- s.ii = 0
- }
- // We have a byte waiting. Emit it.
- if d.byteAvailable {
- s.ii++
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
- d.tokens.n++
- if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.n = 0
- }
- s.index++
-
- // If we have a long run of no matches, skip additional bytes
- // Resets when s.ii overflows after 64KB.
- if s.ii > 31 {
- n := int(s.ii >> 6)
- for j := 0; j < n; j++ {
- if s.index >= d.windowEnd-1 {
- break
- }
-
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
- d.tokens.n++
- if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.n = 0
- }
- s.index++
- }
- // Flush last byte
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
- d.tokens.n++
- d.byteAvailable = false
- // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
- if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.n = 0
+ d.tokens.Reset()
}
}
} else {
@@ -1085,17 +555,17 @@ func (d *compressor) storeHuff() {
if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
return
}
- d.w.writeBlockHuff(false, d.window[:d.windowEnd])
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
d.err = d.w.err
d.windowEnd = 0
}
-// storeHuff will compress and store the currently added data,
+// storeFast will compress and store the currently added data,
// if enough has been accumulated or we at the end of the stream.
// Any error that occurred will be in d.err
-func (d *compressor) storeSnappy() {
+func (d *compressor) storeFast() {
// We only compress if we have maxStoreBlockSize.
- if d.windowEnd < maxStoreBlockSize {
+ if d.windowEnd < len(d.window) {
if !d.sync {
return
}
@@ -1106,32 +576,30 @@ func (d *compressor) storeSnappy() {
}
if d.windowEnd <= 32 {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
- d.tokens.n = 0
- d.windowEnd = 0
} else {
- d.w.writeBlockHuff(false, d.window[:d.windowEnd])
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], true)
d.err = d.w.err
}
- d.tokens.n = 0
+ d.tokens.Reset()
d.windowEnd = 0
- d.snap.Reset()
+ d.fast.Reset()
return
}
}
- d.snap.Encode(&d.tokens, d.window[:d.windowEnd])
+ d.fast.Encode(&d.tokens, d.window[:d.windowEnd])
// If we made zero matches, store the block as is.
- if int(d.tokens.n) == d.windowEnd {
+ if d.tokens.n == 0 {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
// If we removed less than 1/16th, huffman compress the block.
} else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
- d.w.writeBlockHuff(false, d.window[:d.windowEnd])
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
d.err = d.w.err
} else {
- d.w.writeBlockDynamic(d.tokens.tokens[:d.tokens.n], false, d.window[:d.windowEnd])
+ d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync)
d.err = d.w.err
}
- d.tokens.n = 0
+ d.tokens.Reset()
d.windowEnd = 0
}
@@ -1176,36 +644,26 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
d.fill = (*compressor).fillBlock
d.step = (*compressor).store
case level == ConstantCompression:
+ d.w.logReusePenalty = uint(4)
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillBlock
d.step = (*compressor).storeHuff
- case level >= 1 && level <= 4:
- d.snap = newFastEnc(level)
- d.window = make([]byte, maxStoreBlockSize)
- d.fill = (*compressor).fillBlock
- d.step = (*compressor).storeSnappy
case level == DefaultCompression:
level = 5
fallthrough
- case 5 <= level && level <= 9:
+ case level >= 1 && level <= 6:
+ d.w.logReusePenalty = uint(level + 1)
+ d.fast = newFastEnc(level)
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).storeFast
+ case 7 <= level && level <= 9:
+ d.w.logReusePenalty = uint(level)
d.state = &advancedState{}
d.compressionLevel = levels[level]
d.initDeflate()
d.fill = (*compressor).fillDeflate
- if d.fastSkipHashing == skipNever {
- if useSSE42 {
- d.step = (*compressor).deflateLazySSE
- } else {
- d.step = (*compressor).deflateLazy
- }
- } else {
- if useSSE42 {
- d.step = (*compressor).deflateSSE
- } else {
- d.step = (*compressor).deflate
-
- }
- }
+ d.step = (*compressor).deflateLazy
default:
return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
}
@@ -1218,10 +676,10 @@ func (d *compressor) reset(w io.Writer) {
d.sync = false
d.err = nil
// We only need to reset a few things for Snappy.
- if d.snap != nil {
- d.snap.Reset()
+ if d.fast != nil {
+ d.fast.Reset()
d.windowEnd = 0
- d.tokens.n = 0
+ d.tokens.Reset()
return
}
switch d.compressionLevel.chain {
@@ -1240,7 +698,7 @@ func (d *compressor) reset(w io.Writer) {
s.hashOffset = 1
s.index, d.windowEnd = 0, 0
d.blockStart, d.byteAvailable = 0, false
- d.tokens.n = 0
+ d.tokens.Reset()
s.length = minMatchLength - 1
s.offset = 0
s.hash = 0
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
new file mode 100644
index 000000000..b0a470f92
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -0,0 +1,257 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Modified for deflate by Klaus Post (c) 2015.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "fmt"
+ "math/bits"
+)
+
+type fastEnc interface {
+ Encode(dst *tokens, src []byte)
+ Reset()
+}
+
+func newFastEnc(level int) fastEnc {
+ switch level {
+ case 1:
+ return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 2:
+ return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 3:
+ return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 4:
+ return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 5:
+ return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 6:
+ return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
+ default:
+ panic("invalid level specified")
+ }
+}
+
+const (
+ tableBits = 16 // Bits used in the table
+ tableSize = 1 << tableBits // Size of the table
+ tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
+ baseMatchOffset = 1 // The smallest match offset
+ baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
+ maxMatchOffset = 1 << 15 // The largest match offset
+
+ bTableBits = 18 // Bits used in the big tables
+ bTableSize = 1 << bTableBits // Size of the table
+ allocHistory = maxMatchOffset * 10 // Size to preallocate for history.
+ bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize // Reset the buffer offset when reaching this.
+)
+
+const (
+ prime3bytes = 506832829
+ prime4bytes = 2654435761
+ prime5bytes = 889523592379
+ prime6bytes = 227718039650203
+ prime7bytes = 58295818150454627
+ prime8bytes = 0xcf1bbcdcb7a56463
+)
+
+func load32(b []byte, i int) uint32 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:4]
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:8]
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func load3232(b []byte, i int32) uint32 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:4]
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load6432(b []byte, i int32) uint64 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:8]
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func hash(u uint32) uint32 {
+ return (u * 0x1e35a7bd) >> tableShift
+}
+
+type tableEntry struct {
+ val uint32
+ offset int32
+}
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastGen struct {
+ hist []byte
+ cur int32
+}
+
+func (e *fastGen) addBlock(src []byte) int32 {
+ // check if we have space already
+ if len(e.hist)+len(src) > cap(e.hist) {
+ if cap(e.hist) == 0 {
+ e.hist = make([]byte, 0, allocHistory)
+ } else {
+ if cap(e.hist) < maxMatchOffset*2 {
+ panic("unexpected buffer size")
+ }
+ // Move down
+ offset := int32(len(e.hist)) - maxMatchOffset
+ copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+ e.cur += offset
+ e.hist = e.hist[:maxMatchOffset]
+ }
+ }
+ s := int32(len(e.hist))
+ e.hist = append(e.hist, src...)
+ return s
+}
+
+// hash4 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4u(u uint32, h uint8) uint32 {
+ return (u * prime4bytes) >> ((32 - h) & 31)
+}
+
+type tableEntryPrev struct {
+ Cur tableEntry
+ Prev tableEntry
+}
+
+// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4x64(u uint64, h uint8) uint32 {
+ return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
+}
+
+// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash7(u uint64, h uint8) uint32 {
+ return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
+}
+
+// hash8 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash8(u uint64, h uint8) uint32 {
+ return uint32((u * prime8bytes) >> ((64 - h) & 63))
+}
+
+// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash6(u uint64, h uint8) uint32 {
+ return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
+}
+
+// matchlen will return the match length between offsets and t in src.
+// The maximum length returned is maxMatchLength - 4.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
+ if debugDecode {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > maxMatchOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ s1 := int(s) + maxMatchLength - 4
+ if s1 > len(src) {
+ s1 = len(src)
+ }
+
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:s1], src[t:]))
+}
+
+// matchlenLong will return the match length between offsets and t in src.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
+ if debugDecode {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > maxMatchOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:], src[t:]))
+}
+
+// Reset the encoding table.
+func (e *fastGen) Reset() {
+ if cap(e.hist) < int(maxMatchOffset*8) {
+ l := maxMatchOffset * 8
+ // Make it at least 1MB.
+ if l < 1<<20 {
+ l = 1 << 20
+ }
+ e.hist = make([]byte, 0, l)
+ }
+ // We offset current position so everything will be out of reach
+ e.cur += maxMatchOffset + int32(len(e.hist))
+ e.hist = e.hist[:0]
+}
+
+// matchLen returns the maximum length.
+// 'a' must be the shortest of the two.
+func matchLen(a, b []byte) int {
+ b = b[:len(a)]
+ var checked int
+ if len(a) > 4 {
+ // Try 4 bytes first
+ if diff := load32(a, 0) ^ load32(b, 0); diff != 0 {
+ return bits.TrailingZeros32(diff) >> 3
+ }
+ // Switch to 8 byte matching.
+ checked = 4
+ a = a[4:]
+ b = b[4:]
+ for len(a) >= 8 {
+ b = b[:len(a)]
+ if diff := load64(a, 0) ^ load64(b, 0); diff != 0 {
+ return checked + (bits.TrailingZeros64(diff) >> 3)
+ }
+ checked += 8
+ a = a[8:]
+ b = b[8:]
+ }
+ }
+ b = b[:len(a)]
+ for i := range a {
+ if a[i] != b[i] {
+ return int(i) + checked
+ }
+ }
+ return len(a) + checked
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index f46c65418..dd74ffb87 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -85,26 +85,48 @@ type huffmanBitWriter struct {
// Data waiting to be written is bytes[0:nbytes]
// and then the low nbits of bits.
bits uint64
- nbits uint
- bytes [256]byte
- codegenFreq [codegenCodeCount]int32
+ nbits uint16
nbytes uint8
- literalFreq []int32
- offsetFreq []int32
- codegen []uint8
literalEncoding *huffmanEncoder
offsetEncoding *huffmanEncoder
codegenEncoding *huffmanEncoder
err error
+ lastHeader int
+ // Set between 0 (reused block can be up to 2x the size)
+ logReusePenalty uint
+ lastHuffMan bool
+ bytes [256]byte
+ literalFreq [lengthCodesStart + 32]uint16
+ offsetFreq [32]uint16
+ codegenFreq [codegenCodeCount]uint16
+
+ // codegen must have an extra space for the final symbol.
+ codegen [literalCount + offsetCodeCount + 1]uint8
}
+// Huffman reuse.
+//
+// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections.
+//
+// This is controlled by several variables:
+//
+// If lastHeader is non-zero the Huffman table can be reused.
+// This also indicates that a Huffman table has been generated that can output all
+// possible symbols.
+// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated
+// an EOB with the previous table must be written.
+//
+// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
+//
+// An incoming block estimates the output size of a new table using a 'fresh' by calculating the
+// optimal size and adding a penalty in 'logReusePenalty'.
+// A Huffman table is not optimal, which is why we add a penalty, and generating a new table
+// is slower both for compression and decompression.
+
func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
return &huffmanBitWriter{
writer: w,
- literalFreq: make([]int32, lengthCodesStart+32),
- offsetFreq: make([]int32, 32),
- codegen: make([]uint8, maxNumLit+offsetCodeCount+1),
- literalEncoding: newHuffmanEncoder(maxNumLit),
+ literalEncoding: newHuffmanEncoder(literalCount),
codegenEncoding: newHuffmanEncoder(codegenCodeCount),
offsetEncoding: newHuffmanEncoder(offsetCodeCount),
}
@@ -113,7 +135,41 @@ func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
func (w *huffmanBitWriter) reset(writer io.Writer) {
w.writer = writer
w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
- w.bytes = [256]byte{}
+ w.lastHeader = 0
+ w.lastHuffMan = false
+}
+
+func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) {
+ offsets, lits = true, true
+ a := t.offHist[:offsetCodeCount]
+ b := w.offsetFreq[:len(a)]
+ for i := range a {
+ if b[i] == 0 && a[i] != 0 {
+ offsets = false
+ break
+ }
+ }
+
+ a = t.extraHist[:literalCount-256]
+ b = w.literalFreq[256:literalCount]
+ b = b[:len(a)]
+ for i := range a {
+ if b[i] == 0 && a[i] != 0 {
+ lits = false
+ break
+ }
+ }
+ if lits {
+ a = t.litHist[:]
+ b = w.literalFreq[:len(a)]
+ for i := range a {
+ if b[i] == 0 && a[i] != 0 {
+ lits = false
+ break
+ }
+ }
+ }
+ return
}
func (w *huffmanBitWriter) flush() {
@@ -144,30 +200,11 @@ func (w *huffmanBitWriter) write(b []byte) {
_, w.err = w.writer.Write(b)
}
-func (w *huffmanBitWriter) writeBits(b int32, nb uint) {
- w.bits |= uint64(b) << w.nbits
+func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
+ w.bits |= uint64(b) << (w.nbits & 63)
w.nbits += nb
if w.nbits >= 48 {
- bits := w.bits
- w.bits >>= 48
- w.nbits -= 48
- n := w.nbytes
- w.bytes[n] = byte(bits)
- w.bytes[n+1] = byte(bits >> 8)
- w.bytes[n+2] = byte(bits >> 16)
- w.bytes[n+3] = byte(bits >> 24)
- w.bytes[n+4] = byte(bits >> 32)
- w.bytes[n+5] = byte(bits >> 40)
- n += 6
- if n >= bufferFlushSize {
- if w.err != nil {
- n = 0
- return
- }
- w.write(w.bytes[:n])
- n = 0
- }
- w.nbytes = n
+ w.writeOutBits()
}
}
@@ -213,7 +250,7 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE
// a copy of the frequencies, and as the place where we put the result.
// This is fine because the output is always shorter than the input used
// so far.
- codegen := w.codegen // cache
+ codegen := w.codegen[:] // cache
// Copy the concatenated code sizes to codegen. Put a marker at the end.
cgnl := codegen[:numLiterals]
for i := range cgnl {
@@ -292,30 +329,54 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE
codegen[outIndex] = badCode
}
-// dynamicSize returns the size of dynamically encoded data in bits.
-func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
+func (w *huffmanBitWriter) codegens() int {
+ numCodegens := len(w.codegenFreq)
+ for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+ numCodegens--
+ }
+ return numCodegens
+}
+
+func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
numCodegens = len(w.codegenFreq)
for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
numCodegens--
}
- header := 3 + 5 + 5 + 4 + (3 * numCodegens) +
+ return 3 + 5 + 5 + 4 + (3 * numCodegens) +
w.codegenEncoding.bitLength(w.codegenFreq[:]) +
int(w.codegenFreq[16])*2 +
int(w.codegenFreq[17])*3 +
- int(w.codegenFreq[18])*7
+ int(w.codegenFreq[18])*7, numCodegens
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
+ header, numCodegens := w.headerSize()
size = header +
- litEnc.bitLength(w.literalFreq) +
- offEnc.bitLength(w.offsetFreq) +
+ litEnc.bitLength(w.literalFreq[:]) +
+ offEnc.bitLength(w.offsetFreq[:]) +
extraBits
-
return size, numCodegens
}
+// extraBitSize will return the number of bits that will be written
+// as "extra" bits on matches.
+func (w *huffmanBitWriter) extraBitSize() int {
+ total := 0
+ for i, n := range w.literalFreq[257:literalCount] {
+ total += int(n) * int(lengthExtraBits[i&31])
+ }
+ for i, n := range w.offsetFreq[:offsetCodeCount] {
+ total += int(n) * int(offsetExtraBits[i&31])
+ }
+ return total
+}
+
// fixedSize returns the size of dynamically encoded data in bits.
func (w *huffmanBitWriter) fixedSize(extraBits int) int {
return 3 +
- fixedLiteralEncoding.bitLength(w.literalFreq) +
- fixedOffsetEncoding.bitLength(w.offsetFreq) +
+ fixedLiteralEncoding.bitLength(w.literalFreq[:]) +
+ fixedOffsetEncoding.bitLength(w.offsetFreq[:]) +
extraBits
}
@@ -333,30 +394,36 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
}
func (w *huffmanBitWriter) writeCode(c hcode) {
+ // The function does not get inlined if we "& 63" the shift.
w.bits |= uint64(c.code) << w.nbits
- w.nbits += uint(c.len)
+ w.nbits += c.len
if w.nbits >= 48 {
- bits := w.bits
- w.bits >>= 48
- w.nbits -= 48
- n := w.nbytes
- w.bytes[n] = byte(bits)
- w.bytes[n+1] = byte(bits >> 8)
- w.bytes[n+2] = byte(bits >> 16)
- w.bytes[n+3] = byte(bits >> 24)
- w.bytes[n+4] = byte(bits >> 32)
- w.bytes[n+5] = byte(bits >> 40)
- n += 6
- if n >= bufferFlushSize {
- if w.err != nil {
- n = 0
- return
- }
- w.write(w.bytes[:n])
+ w.writeOutBits()
+ }
+}
+
+// writeOutBits will write bits to the buffer.
+func (w *huffmanBitWriter) writeOutBits() {
+ bits := w.bits
+ w.bits >>= 48
+ w.nbits -= 48
+ n := w.nbytes
+ w.bytes[n] = byte(bits)
+ w.bytes[n+1] = byte(bits >> 8)
+ w.bytes[n+2] = byte(bits >> 16)
+ w.bytes[n+3] = byte(bits >> 24)
+ w.bytes[n+4] = byte(bits >> 32)
+ w.bytes[n+5] = byte(bits >> 40)
+ n += 6
+ if n >= bufferFlushSize {
+ if w.err != nil {
n = 0
+ return
}
- w.nbytes = n
+ w.write(w.bytes[:n])
+ n = 0
}
+ w.nbytes = n
}
// Write the header of a dynamic Huffman block to the output stream.
@@ -395,15 +462,12 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n
case 16:
w.writeBits(int32(w.codegen[i]), 2)
i++
- break
case 17:
w.writeBits(int32(w.codegen[i]), 3)
i++
- break
case 18:
w.writeBits(int32(w.codegen[i]), 7)
i++
- break
}
}
}
@@ -412,6 +476,11 @@ func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
if w.err != nil {
return
}
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
var flag int32
if isEof {
flag = 1
@@ -426,6 +495,12 @@ func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
if w.err != nil {
return
}
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+
// Indicate that we are a fixed Huffman block
var value int32 = 2
if isEof {
@@ -439,29 +514,23 @@ func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
// is larger than the original bytes, the data will be written as a
// stored block.
// If the input is nil, the tokens will always be Huffman encoded.
-func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
+func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
if w.err != nil {
return
}
- tokens = append(tokens, endBlockMarker)
- numLiterals, numOffsets := w.indexTokens(tokens)
-
+ tokens.AddEOB()
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+ numLiterals, numOffsets := w.indexTokens(tokens, false)
+ w.generate(tokens)
var extraBits int
storedSize, storable := w.storedSize(input)
if storable {
- // We only bother calculating the costs of the extra bits required by
- // the length of offset fields (which will be the same for both fixed
- // and dynamic encoding), if we need to compare those two encodings
- // against stored encoding.
- for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ {
- // First eight length codes have extra size = 0.
- extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart])
- }
- for offsetCode := 4; offsetCode < numOffsets; offsetCode++ {
- // First four offset codes have extra size = 0.
- extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode&63])
- }
+ extraBits = w.extraBitSize()
}
// Figure out smallest code.
@@ -500,7 +569,7 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
}
// Write the tokens.
- w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes)
+ w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes)
}
// writeBlockDynamic encodes a block using a dynamic Huffman table.
@@ -508,72 +577,103 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
// histogram distribution.
// If input is supplied and the compression savings are below 1/16th of the
// input size the block is stored.
-func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) {
+func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) {
if w.err != nil {
return
}
- tokens = append(tokens, endBlockMarker)
- numLiterals, numOffsets := w.indexTokens(tokens)
+ sync = sync || eof
+ if sync {
+ tokens.AddEOB()
+ }
- // Generate codegen and codegenFrequencies, which indicates how to encode
- // the literalEncoding and the offsetEncoding.
- w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
- w.codegenEncoding.generate(w.codegenFreq[:], 7)
- size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0)
+ // We cannot reuse pure huffman table.
+ if w.lastHuffMan && w.lastHeader > 0 {
+ // We will not try to reuse.
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ w.lastHuffMan = false
+ }
+ if !sync {
+ tokens.Fill()
+ }
+ numLiterals, numOffsets := w.indexTokens(tokens, !sync)
- // Store bytes, if we don't get a reasonable improvement.
- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- return
+ var size int
+ // Check if we should reuse.
+ if w.lastHeader > 0 {
+ // Estimate size for using a new table
+ newSize := w.lastHeader + tokens.EstimatedBits()
+
+ // The estimated size is calculated as an optimal table.
+ // We add a penalty to make it more realistic and re-use a bit more.
+ newSize += newSize >> (w.logReusePenalty & 31)
+ extra := w.extraBitSize()
+ reuseSize, _ := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extra)
+
+ // Check if a new table is better.
+ if newSize < reuseSize {
+ // Write the EOB we owe.
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ size = newSize
+ w.lastHeader = 0
+ } else {
+ size = reuseSize
+ }
+ // Check if we get a reasonable size decrease.
+ if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ w.lastHeader = 0
+ return
+ }
}
- // Write Huffman table.
- w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ // We want a new block/table
+ if w.lastHeader == 0 {
+ w.generate(tokens)
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ var numCodegens int
+ size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize())
+ // Store bytes, if we don't get a reasonable improvement.
+ if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ w.lastHeader = 0
+ return
+ }
+
+ // Write Huffman table.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ w.lastHeader, _ = w.headerSize()
+ w.lastHuffMan = false
+ }
+ if sync {
+ w.lastHeader = 0
+ }
// Write the tokens.
- w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes)
+ w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
}
// indexTokens indexes a slice of tokens, and updates
// literalFreq and offsetFreq, and generates literalEncoding
// and offsetEncoding.
// The number of literal and offset tokens is returned.
-func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) {
- for i := range w.literalFreq {
- w.literalFreq[i] = 0
- }
- for i := range w.offsetFreq {
- w.offsetFreq[i] = 0
- }
+func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
+ copy(w.literalFreq[:], t.litHist[:])
+ copy(w.literalFreq[256:], t.extraHist[:])
+ copy(w.offsetFreq[:], t.offHist[:offsetCodeCount])
- if len(tokens) == 0 {
+ if t.n == 0 {
return
}
-
- // Only last token should be endBlockMarker.
- if tokens[len(tokens)-1] == endBlockMarker {
- w.literalFreq[endBlockMarker]++
- tokens = tokens[:len(tokens)-1]
+ if filled {
+ return maxNumLit, maxNumDist
}
-
- // Create slices up to the next power of two to avoid bounds checks.
- lits := w.literalFreq[:256]
- offs := w.offsetFreq[:32]
- lengths := w.literalFreq[lengthCodesStart:]
- lengths = lengths[:32]
- for _, t := range tokens {
- if t < endBlockMarker {
- lits[t.literal()]++
- continue
- }
- length := t.length()
- offset := t.offset()
- lengths[lengthCode(length)&31]++
- offs[offsetCode(offset)&31]++
- }
-
// get the number of literals
numLiterals = len(w.literalFreq)
for w.literalFreq[numLiterals-1] == 0 {
@@ -590,11 +690,14 @@ func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets
w.offsetFreq[0] = 1
numOffsets = 1
}
- w.literalEncoding.generate(w.literalFreq[:maxNumLit], 15)
- w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
return
}
+func (w *huffmanBitWriter) generate(t *tokens) {
+ w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
+ w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
+}
+
// writeTokens writes a slice of tokens to the output.
// codes for literal and offset encoding must be supplied.
func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
@@ -626,8 +729,19 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
// Write the length
length := t.length()
lengthCode := lengthCode(length)
- w.writeCode(lengths[lengthCode&31])
- extraLengthBits := uint(lengthExtraBits[lengthCode&31])
+ if false {
+ w.writeCode(lengths[lengthCode&31])
+ } else {
+ // inlined
+ c := lengths[lengthCode&31]
+ w.bits |= uint64(c.code) << (w.nbits & 63)
+ w.nbits += c.len
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+ }
+
+ extraLengthBits := uint16(lengthExtraBits[lengthCode&31])
if extraLengthBits > 0 {
extraLength := int32(length - lengthBase[lengthCode&31])
w.writeBits(extraLength, extraLengthBits)
@@ -635,8 +749,18 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
// Write the offset
offset := t.offset()
offsetCode := offsetCode(offset)
- w.writeCode(offs[offsetCode&31])
- extraOffsetBits := uint(offsetExtraBits[offsetCode&63])
+ if false {
+ w.writeCode(offs[offsetCode&31])
+ } else {
+ // inlined
+ c := offs[offsetCode&31]
+ w.bits |= uint64(c.code) << (w.nbits & 63)
+ w.nbits += c.len
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+ }
+ extraOffsetBits := uint16(offsetExtraBits[offsetCode&63])
if extraOffsetBits > 0 {
extraOffset := int32(offset - offsetBase[offsetCode&63])
w.writeBits(extraOffset, extraOffsetBits)
@@ -661,75 +785,93 @@ func init() {
// writeBlockHuff encodes a block of bytes as either
// Huffman encoded literals or uncompressed bytes if the
// results only gains very little from compression.
-func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) {
+func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
if w.err != nil {
return
}
// Clear histogram
- for i := range w.literalFreq {
+ for i := range w.literalFreq[:] {
w.literalFreq[i] = 0
}
+ if !w.lastHuffMan {
+ for i := range w.offsetFreq[:] {
+ w.offsetFreq[i] = 0
+ }
+ }
// Add everything as literals
- histogram(input, w.literalFreq)
+ estBits := histogramSize(input, w.literalFreq[:], !eof && !sync) + 15
- w.literalFreq[endBlockMarker] = 1
+ // Store bytes, if we don't get a reasonable improvement.
+ ssize, storable := w.storedSize(input)
+ if storable && ssize < (estBits+estBits>>4) {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
- const numLiterals = endBlockMarker + 1
- const numOffsets = 1
+ if w.lastHeader > 0 {
+ size, _ := w.dynamicSize(w.literalEncoding, huffOffset, w.lastHeader)
+ estBits += estBits >> (w.logReusePenalty)
- w.literalEncoding.generate(w.literalFreq[:maxNumLit], 15)
+ if estBits < size {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+ }
- // Figure out smallest code.
- // Always use dynamic Huffman or Store
- var numCodegens int
+ const numLiterals = endBlockMarker + 1
+ const numOffsets = 1
+ if w.lastHeader == 0 {
+ w.literalFreq[endBlockMarker] = 1
+ w.literalEncoding.generate(w.literalFreq[:numLiterals], 15)
- // Generate codegen and codegenFrequencies, which indicates how to encode
- // the literalEncoding and the offsetEncoding.
- w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
- w.codegenEncoding.generate(w.codegenFreq[:], 7)
- size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0)
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ numCodegens := w.codegens()
- // Store bytes, if we don't get a reasonable improvement.
- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- return
+ // Huffman.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ w.lastHuffMan = true
+ w.lastHeader, _ = w.headerSize()
}
- // Huffman.
- w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
encoding := w.literalEncoding.codes[:257]
- n := w.nbytes
for _, t := range input {
// Bitwriting inlined, ~30% speedup
c := encoding[t]
- w.bits |= uint64(c.code) << w.nbits
- w.nbits += uint(c.len)
- if w.nbits < 48 {
- continue
- }
- // Store 6 bytes
- bits := w.bits
- w.bits >>= 48
- w.nbits -= 48
- w.bytes[n] = byte(bits)
- w.bytes[n+1] = byte(bits >> 8)
- w.bytes[n+2] = byte(bits >> 16)
- w.bytes[n+3] = byte(bits >> 24)
- w.bytes[n+4] = byte(bits >> 32)
- w.bytes[n+5] = byte(bits >> 40)
- n += 6
- if n < bufferFlushSize {
- continue
- }
- w.write(w.bytes[:n])
- if w.err != nil {
- return // Return early in the event of write failures
+ w.bits |= uint64(c.code) << ((w.nbits) & 63)
+ w.nbits += c.len
+ if w.nbits >= 48 {
+ bits := w.bits
+ w.bits >>= 48
+ w.nbits -= 48
+ n := w.nbytes
+ w.bytes[n] = byte(bits)
+ w.bytes[n+1] = byte(bits >> 8)
+ w.bytes[n+2] = byte(bits >> 16)
+ w.bytes[n+3] = byte(bits >> 24)
+ w.bytes[n+4] = byte(bits >> 32)
+ w.bytes[n+5] = byte(bits >> 40)
+ n += 6
+ if n >= bufferFlushSize {
+ if w.err != nil {
+ n = 0
+ return
+ }
+ w.write(w.bytes[:n])
+ n = 0
+ }
+ w.nbytes = n
}
- n = 0
}
- w.nbytes = n
- w.writeCode(encoding[endBlockMarker])
+ if eof || sync {
+ w.writeCode(encoding[endBlockMarker])
+ w.lastHeader = 0
+ w.lastHuffMan = false
+ }
}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
index f65f79336..1810c6898 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_code.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -10,6 +10,12 @@ import (
"sort"
)
+const (
+ maxBitsLimit = 16
+ // number of valid literals
+ literalCount = 286
+)
+
// hcode is a huffman code with a bit code and bit length.
type hcode struct {
code, len uint16
@@ -25,7 +31,7 @@ type huffmanEncoder struct {
type literalNode struct {
literal uint16
- freq int32
+ freq uint16
}
// A levelInfo describes the state of the constructed tree for a given depth.
@@ -54,7 +60,11 @@ func (h *hcode) set(code uint16, length uint16) {
h.code = code
}
-func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} }
+func reverseBits(number uint16, bitLength byte) uint16 {
+ return bits.Reverse16(number << ((16 - bitLength) & 15))
+}
+
+func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} }
func newHuffmanEncoder(size int) *huffmanEncoder {
// Make capacity to next power of two.
@@ -64,10 +74,10 @@ func newHuffmanEncoder(size int) *huffmanEncoder {
// Generates a HuffmanCode corresponding to the fixed literal table
func generateFixedLiteralEncoding() *huffmanEncoder {
- h := newHuffmanEncoder(maxNumLit)
+ h := newHuffmanEncoder(literalCount)
codes := h.codes
var ch uint16
- for ch = 0; ch < maxNumLit; ch++ {
+ for ch = 0; ch < literalCount; ch++ {
var bits uint16
var size uint16
switch {
@@ -75,17 +85,14 @@ func generateFixedLiteralEncoding() *huffmanEncoder {
// size 8, 000110000 .. 10111111
bits = ch + 48
size = 8
- break
case ch < 256:
// size 9, 110010000 .. 111111111
bits = ch + 400 - 144
size = 9
- break
case ch < 280:
// size 7, 0000000 .. 0010111
bits = ch - 256
size = 7
- break
default:
// size 8, 11000000 .. 11000111
bits = ch + 192 - 280
@@ -108,7 +115,7 @@ func generateFixedOffsetEncoding() *huffmanEncoder {
var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding()
var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding()
-func (h *huffmanEncoder) bitLength(freq []int32) int {
+func (h *huffmanEncoder) bitLength(freq []uint16) int {
var total int
for i, f := range freq {
if f != 0 {
@@ -118,8 +125,6 @@ func (h *huffmanEncoder) bitLength(freq []int32) int {
return total
}
-const maxBitsLimit = 16
-
// Return the number of literals assigned to each bit size in the Huffman encoding
//
// This method is only called when list.length >= 3
@@ -163,9 +168,9 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
// We initialize the levels as if we had already figured this out.
levels[level] = levelInfo{
level: level,
- lastFreq: list[1].freq,
- nextCharFreq: list[2].freq,
- nextPairFreq: list[0].freq + list[1].freq,
+ lastFreq: int32(list[1].freq),
+ nextCharFreq: int32(list[2].freq),
+ nextPairFreq: int32(list[0].freq) + int32(list[1].freq),
}
leafCounts[level][level] = 2
if level == 1 {
@@ -197,7 +202,12 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
l.lastFreq = l.nextCharFreq
// Lower leafCounts are the same of the previous node.
leafCounts[level][level] = n
- l.nextCharFreq = list[n].freq
+ e := list[n]
+ if e.literal < math.MaxUint16 {
+ l.nextCharFreq = int32(e.freq)
+ } else {
+ l.nextCharFreq = math.MaxInt32
+ }
} else {
// The next item on this row is a pair from the previous row.
// nextPairFreq isn't valid until we generate two
@@ -273,12 +283,12 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
//
// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
// maxBits The maximum number of bits to use for any literal.
-func (h *huffmanEncoder) generate(freq []int32, maxBits int32) {
+func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
if h.freqcache == nil {
// Allocate a reusable buffer with the longest possible frequency table.
- // Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit.
- // The largest of these is maxNumLit, so we allocate for that case.
- h.freqcache = make([]literalNode, maxNumLit+1)
+ // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
+ // The largest of these is literalCount, so we allocate for that case.
+ h.freqcache = make([]literalNode, literalCount+1)
}
list := h.freqcache[:len(freq)+1]
// Number of non-zero literals
@@ -345,3 +355,27 @@ func (s byFreq) Less(i, j int) bool {
}
func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// histogramSize accumulates a histogram of b in h.
+// An estimated size in bits is returned.
+// Unassigned values are assigned '1' in the histogram.
+// len(h) must be >= 256, and h's elements must be all zeroes.
+func histogramSize(b []byte, h []uint16, fill bool) int {
+ h = h[:256]
+ for _, t := range b {
+ h[t]++
+ }
+ invTotal := 1.0 / float64(len(b))
+ shannon := 0.0
+ single := math.Ceil(-math.Log2(invTotal))
+ for i, v := range h[:] {
+ if v > 0 {
+ n := float64(v)
+ shannon += math.Ceil(-math.Log2(n*invTotal) * n)
+ } else if fill {
+ shannon += single
+ h[i] = 1
+ }
+ }
+ return int(shannon + 0.99)
+}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
index 800d0ce9e..6dc5b5d06 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -9,6 +9,7 @@ package flate
import (
"bufio"
+ "fmt"
"io"
"math/bits"
"strconv"
@@ -24,6 +25,8 @@ const (
maxNumLit = 286
maxNumDist = 30
numCodes = 19 // number of codes in Huffman meta-code
+
+ debugDecode = false
)
// Initialize the fixedHuffmanDecoder only once upon first use.
@@ -104,8 +107,8 @@ const (
type huffmanDecoder struct {
min int // the minimum code length
- chunks *[huffmanNumChunks]uint32 // chunks as described above
- links [][]uint32 // overflow links
+ chunks *[huffmanNumChunks]uint16 // chunks as described above
+ links [][]uint16 // overflow links
linkMask uint32 // mask the width of the link table
}
@@ -121,7 +124,7 @@ func (h *huffmanDecoder) init(lengths []int) bool {
const sanity = false
if h.chunks == nil {
- h.chunks = &[huffmanNumChunks]uint32{}
+ h.chunks = &[huffmanNumChunks]uint16{}
}
if h.min != 0 {
*h = huffmanDecoder{chunks: h.chunks, links: h.links}
@@ -169,6 +172,9 @@ func (h *huffmanDecoder) init(lengths []int) bool {
// accept degenerate single-code codings. See also
// TestDegenerateHuffmanCoding.
if code != 1<<uint(max) && !(code == 1 && max == 1) {
+ if debugDecode {
+ fmt.Println("coding failed, code, max:", code, max, code == 1<<uint(max), code == 1 && max == 1, "(one should be true)")
+ }
return false
}
@@ -185,7 +191,7 @@ func (h *huffmanDecoder) init(lengths []int) bool {
// create link tables
link := nextcode[huffmanChunkBits+1] >> 1
if cap(h.links) < huffmanNumChunks-link {
- h.links = make([][]uint32, huffmanNumChunks-link)
+ h.links = make([][]uint16, huffmanNumChunks-link)
} else {
h.links = h.links[:huffmanNumChunks-link]
}
@@ -196,9 +202,9 @@ func (h *huffmanDecoder) init(lengths []int) bool {
if sanity && h.chunks[reverse] != 0 {
panic("impossible: overwriting existing chunk")
}
- h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
+ h.chunks[reverse] = uint16(off<<huffmanValueShift | (huffmanChunkBits + 1))
if cap(h.links[off]) < numLinks {
- h.links[off] = make([]uint32, numLinks)
+ h.links[off] = make([]uint16, numLinks)
} else {
links := h.links[off][:0]
h.links[off] = links[:numLinks]
@@ -214,7 +220,7 @@ func (h *huffmanDecoder) init(lengths []int) bool {
}
code := nextcode[n]
nextcode[n]++
- chunk := uint32(i<<huffmanValueShift | n)
+ chunk := uint16(i<<huffmanValueShift | n)
reverse := int(bits.Reverse16(uint16(code)))
reverse >>= uint(16 - n)
if n <= huffmanChunkBits {
@@ -347,6 +353,9 @@ func (f *decompressor) nextBlock() {
f.huffmanBlock()
default:
// 3 is reserved.
+ if debugDecode {
+ fmt.Println("reserved data block encountered")
+ }
f.err = CorruptInputError(f.roffset)
}
}
@@ -425,11 +434,17 @@ func (f *decompressor) readHuffman() error {
}
nlit := int(f.b&0x1F) + 257
if nlit > maxNumLit {
+ if debugDecode {
+ fmt.Println("nlit > maxNumLit", nlit)
+ }
return CorruptInputError(f.roffset)
}
f.b >>= 5
ndist := int(f.b&0x1F) + 1
if ndist > maxNumDist {
+ if debugDecode {
+ fmt.Println("ndist > maxNumDist", ndist)
+ }
return CorruptInputError(f.roffset)
}
f.b >>= 5
@@ -453,6 +468,9 @@ func (f *decompressor) readHuffman() error {
f.codebits[codeOrder[i]] = 0
}
if !f.h1.init(f.codebits[0:]) {
+ if debugDecode {
+ fmt.Println("init codebits failed")
+ }
return CorruptInputError(f.roffset)
}
@@ -480,6 +498,9 @@ func (f *decompressor) readHuffman() error {
rep = 3
nb = 2
if i == 0 {
+ if debugDecode {
+ fmt.Println("i==0")
+ }
return CorruptInputError(f.roffset)
}
b = f.bits[i-1]
@@ -494,6 +515,9 @@ func (f *decompressor) readHuffman() error {
}
for f.nb < nb {
if err := f.moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits:", err)
+ }
return err
}
}
@@ -501,6 +525,9 @@ func (f *decompressor) readHuffman() error {
f.b >>= nb
f.nb -= nb
if i+rep > n {
+ if debugDecode {
+ fmt.Println("i+rep > n", i, rep, n)
+ }
return CorruptInputError(f.roffset)
}
for j := 0; j < rep; j++ {
@@ -510,6 +537,9 @@ func (f *decompressor) readHuffman() error {
}
if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
+ if debugDecode {
+ fmt.Println("init2 failed")
+ }
return CorruptInputError(f.roffset)
}
@@ -587,12 +617,18 @@ readLiteral:
length = 258
n = 0
default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
f.err = CorruptInputError(f.roffset)
return
}
if n > 0 {
for f.nb < n {
if err = f.moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
f.err = err
return
}
@@ -606,6 +642,9 @@ readLiteral:
if f.hd == nil {
for f.nb < 5 {
if err = f.moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
f.err = err
return
}
@@ -615,6 +654,9 @@ readLiteral:
f.nb -= 5
} else {
if dist, err = f.huffSym(f.hd); err != nil {
+ if debugDecode {
+ fmt.Println("huffsym:", err)
+ }
f.err = err
return
}
@@ -629,6 +671,9 @@ readLiteral:
extra := (dist & 1) << nb
for f.nb < nb {
if err = f.moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb<nb:", err)
+ }
f.err = err
return
}
@@ -638,12 +683,18 @@ readLiteral:
f.nb -= nb
dist = 1<<(nb+1) + 1 + extra
default:
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
f.err = CorruptInputError(f.roffset)
return
}
// No check on length; encoding can be prescient.
if dist > f.dict.histSize() {
+ if debugDecode {
+ fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ }
f.err = CorruptInputError(f.roffset)
return
}
@@ -688,6 +739,9 @@ func (f *decompressor) dataBlock() {
n := int(f.buf[0]) | int(f.buf[1])<<8
nn := int(f.buf[2]) | int(f.buf[3])<<8
if uint16(nn) != uint16(^n) {
+ if debugDecode {
+ fmt.Println("uint16(nn) != uint16(^n)", nn, ^n)
+ }
f.err = CorruptInputError(f.roffset)
return
}
@@ -789,6 +843,9 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
if n == 0 {
f.b = b
f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
f.err = CorruptInputError(f.roffset)
return 0, f.err
}
diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go
new file mode 100644
index 000000000..20de8f11f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level1.go
@@ -0,0 +1,174 @@
+package flate
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL1 struct {
+ fastGen
+ table [tableSize]tableEntry
+}
+
+// EncodeL1 uses a similar algorithm to level 1
+func (e *fastEncL1) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load3232(src, s)
+
+ for {
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hash(cv)
+ candidate = e.table[nextHash]
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+
+ now := load6432(src, nextS)
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv}
+ nextHash = hash(uint32(now))
+
+ offset := s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && cv == candidate.val {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)}
+ break
+ }
+
+ // Do one right away...
+ cv = uint32(now)
+ s = nextS
+ nextS++
+ candidate = e.table[nextHash]
+ now >>= 8
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv}
+
+ offset = s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && cv == candidate.val {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)}
+ break
+ }
+ cv = uint32(now)
+ s = nextS
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset - e.cur
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+
+ // Save the match found
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+l+4) < len(src) {
+ cv := load3232(src, s)
+ e.table[hash(cv)] = tableEntry{offset: s + e.cur, val: cv}
+ }
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6432(src, s-2)
+ o := e.cur + s - 2
+ prevHash := hash(uint32(x))
+ e.table[prevHash] = tableEntry{offset: o, val: uint32(x)}
+ x >>= 16
+ currHash := hash(uint32(x))
+ candidate = e.table[currHash]
+ e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x)}
+
+ offset := s - (candidate.offset - e.cur)
+ if offset > maxMatchOffset || uint32(x) != candidate.val {
+ cv = uint32(x >> 8)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go
new file mode 100644
index 000000000..7c824431e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level2.go
@@ -0,0 +1,199 @@
+package flate
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL2 struct {
+ fastGen
+ table [bTableSize]tableEntry
+}
+
+// EncodeL2 uses a similar algorithm to level 1, but is capable
+// of matching across blocks giving better compression at a small slowdown.
+func (e *fastEncL2) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load3232(src, s)
+ for {
+ // When should we start skipping if we haven't found matches in a long while.
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hash4u(cv, bTableBits)
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = e.table[nextHash]
+ now := load6432(src, nextS)
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv}
+ nextHash = hash4u(uint32(now), bTableBits)
+
+ offset := s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && cv == candidate.val {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)}
+ break
+ }
+
+ // Do one right away...
+ cv = uint32(now)
+ s = nextS
+ nextS++
+ candidate = e.table[nextHash]
+ now >>= 8
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv}
+
+ offset = s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && cv == candidate.val {
+ break
+ }
+ cv = uint32(now)
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset - e.cur
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+l+4) < len(src) {
+ cv := load3232(src, s)
+ e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur, val: cv}
+ }
+ goto emitRemainder
+ }
+
+ // Store every second hash in-between, but offset by 1.
+ for i := s - l + 2; i < s-5; i += 7 {
+ x := load6432(src, int32(i))
+ nextHash := hash4u(uint32(x), bTableBits)
+ e.table[nextHash] = tableEntry{offset: e.cur + i, val: uint32(x)}
+ // Skip one
+ x >>= 16
+ nextHash = hash4u(uint32(x), bTableBits)
+ e.table[nextHash] = tableEntry{offset: e.cur + i + 2, val: uint32(x)}
+ // Skip one
+ x >>= 16
+ nextHash = hash4u(uint32(x), bTableBits)
+ e.table[nextHash] = tableEntry{offset: e.cur + i + 4, val: uint32(x)}
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 to s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6432(src, s-2)
+ o := e.cur + s - 2
+ prevHash := hash4u(uint32(x), bTableBits)
+ prevHash2 := hash4u(uint32(x>>8), bTableBits)
+ e.table[prevHash] = tableEntry{offset: o, val: uint32(x)}
+ e.table[prevHash2] = tableEntry{offset: o + 1, val: uint32(x >> 8)}
+ currHash := hash4u(uint32(x>>16), bTableBits)
+ candidate = e.table[currHash]
+ e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x >> 16)}
+
+ offset := s - (candidate.offset - e.cur)
+ if offset > maxMatchOffset || uint32(x>>16) != candidate.val {
+ cv = uint32(x >> 24)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go
new file mode 100644
index 000000000..4153d24c9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level3.go
@@ -0,0 +1,225 @@
+package flate
+
+// fastEncL3
+type fastEncL3 struct {
+ fastGen
+ table [tableSize]tableEntryPrev
+}
+
+// Encode uses a similar algorithm to level 2, will check up to two candidates.
+func (e *fastEncL3) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 8 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ }
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ e.table[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // Skip if too small.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load3232(src, s)
+ for {
+ const skipLog = 6
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hash(cv)
+ s = nextS
+ nextS = s + 1 + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidates := e.table[nextHash]
+ now := load3232(src, nextS)
+ e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
+
+ // Check both candidates
+ candidate = candidates.Cur
+ offset := s - (candidate.offset - e.cur)
+ if cv == candidate.val {
+ if offset > maxMatchOffset {
+ cv = now
+ // Previous will also be invalid, we have nothing.
+ continue
+ }
+ o2 := s - (candidates.Prev.offset - e.cur)
+ if cv != candidates.Prev.val || o2 > maxMatchOffset {
+ break
+ }
+ // Both match and are valid, pick longest.
+ l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:])
+ if l2 > l1 {
+ candidate = candidates.Prev
+ }
+ break
+ } else {
+ // We only check if value mismatches.
+ // Offset will always be invalid in other cases.
+ candidate = candidates.Prev
+ if cv == candidate.val {
+ offset := s - (candidate.offset - e.cur)
+ if offset <= maxMatchOffset {
+ break
+ }
+ }
+ }
+ cv = now
+ }
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ //
+ t := candidate.offset - e.cur
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ t += l
+ // Index first pair after match end.
+ if int(t+4) < len(src) && t > 0 {
+ cv := load3232(src, t)
+ nextHash := hash(cv)
+ e.table[nextHash] = tableEntryPrev{
+ Prev: e.table[nextHash].Cur,
+ Cur: tableEntry{offset: e.cur + t, val: cv},
+ }
+ }
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-3 to s.
+ x := load6432(src, s-3)
+ prevHash := hash(uint32(x))
+ e.table[prevHash] = tableEntryPrev{
+ Prev: e.table[prevHash].Cur,
+ Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
+ }
+ x >>= 8
+ prevHash = hash(uint32(x))
+
+ e.table[prevHash] = tableEntryPrev{
+ Prev: e.table[prevHash].Cur,
+ Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
+ }
+ x >>= 8
+ prevHash = hash(uint32(x))
+
+ e.table[prevHash] = tableEntryPrev{
+ Prev: e.table[prevHash].Cur,
+ Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
+ }
+ x >>= 8
+ currHash := hash(uint32(x))
+ candidates := e.table[currHash]
+ cv = uint32(x)
+ e.table[currHash] = tableEntryPrev{
+ Prev: candidates.Cur,
+ Cur: tableEntry{offset: s + e.cur, val: cv},
+ }
+
+ // Check both candidates
+ candidate = candidates.Cur
+ if cv == candidate.val {
+ offset := s - (candidate.offset - e.cur)
+ if offset <= maxMatchOffset {
+ continue
+ }
+ } else {
+ // We only check if value mismatches.
+ // Offset will always be invalid in other cases.
+ candidate = candidates.Prev
+ if cv == candidate.val {
+ offset := s - (candidate.offset - e.cur)
+ if offset <= maxMatchOffset {
+ continue
+ }
+ }
+ }
+ cv = uint32(x >> 8)
+ s++
+ break
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go
new file mode 100644
index 000000000..c689ac771
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level4.go
@@ -0,0 +1,210 @@
+package flate
+
+import "fmt"
+
+type fastEncL4 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntry
+}
+
+func (e *fastEncL4) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.bTable[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 6
+ const doEvery = 1
+
+ nextS := s
+ var t int32
+ for {
+ nextHashS := hash4x64(cv, tableBits)
+ nextHashL := hash7(cv, tableBits)
+
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.table[nextHashS] = entry
+ e.bTable[nextHashL] = entry
+
+ t = lCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == lCandidate.val {
+ // We got a long match. Use that.
+ break
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == sCandidate.val {
+ // Found a 4 match...
+ lCandidate = e.bTable[hash7(next, tableBits)]
+
+ // If the next long is a candidate, check if we should use that instead...
+ lOff := nextS - (lCandidate.offset - e.cur)
+ if lOff < maxMatchOffset && lCandidate.val == uint32(next) {
+ l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
+ if l2 > l1 {
+ s = nextS
+ t = lCandidate.offset - e.cur
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+ if false {
+ if t >= s {
+ panic("s-t")
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ }
+ goto emitRemainder
+ }
+
+ // Store every 3rd hash in-between
+ if true {
+ i := nextS
+ if i < s-1 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur, val: uint32(cv)}
+ t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1}
+ e.bTable[hash7(cv, tableBits)] = t
+ e.bTable[hash7(cv>>8, tableBits)] = t2
+ e.table[hash4u(t2.val, tableBits)] = t2
+
+ i += 3
+ for ; i < s-1; i += 3 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur, val: uint32(cv)}
+ t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1}
+ e.bTable[hash7(cv, tableBits)] = t
+ e.bTable[hash7(cv>>8, tableBits)] = t2
+ e.table[hash4u(t2.val, tableBits)] = t2
+ }
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ x := load6432(src, s-1)
+ o := e.cur + s - 1
+ prevHashS := hash4x64(x, tableBits)
+ prevHashL := hash7(x, tableBits)
+ e.table[prevHashS] = tableEntry{offset: o, val: uint32(x)}
+ e.bTable[prevHashL] = tableEntry{offset: o, val: uint32(x)}
+ cv = x >> 8
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go
new file mode 100644
index 000000000..14a235612
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level5.go
@@ -0,0 +1,276 @@
+package flate
+
+import "fmt"
+
+type fastEncL5 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL5) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ v.Prev.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ }
+ e.bTable[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 6
+ const doEvery = 1
+
+ nextS := s
+ var l int32
+ var t int32
+ for {
+ nextHashS := hash4x64(cv, tableBits)
+ nextHashL := hash7(cv, tableBits)
+
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.table[nextHashS] = entry
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+ nextHashS = hash4x64(next, tableBits)
+ nextHashL = hash7(next, tableBits)
+
+ t = lCandidate.Cur.offset - e.cur
+ if s-t < maxMatchOffset {
+ if uint32(cv) == lCandidate.Cur.val {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur
+
+ t2 := lCandidate.Prev.offset - e.cur
+ if s-t2 < maxMatchOffset && uint32(cv) == lCandidate.Prev.val {
+ l = e.matchlen(s+4, t+4, src) + 4
+ ml1 := e.matchlen(s+4, t2+4, src) + 4
+ if ml1 > l {
+ t = t2
+ l = ml1
+ break
+ }
+ }
+ break
+ }
+ t = lCandidate.Prev.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == lCandidate.Prev.val {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur
+ break
+ }
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == sCandidate.val {
+ // Found a 4 match...
+ l = e.matchlen(s+4, t+4, src) + 4
+ lCandidate = e.bTable[nextHashL]
+ // Store the next match
+
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur
+
+ // If the next long is a candidate, use that...
+ t2 := lCandidate.Cur.offset - e.cur
+ if nextS-t2 < maxMatchOffset {
+ if lCandidate.Cur.val == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ // If the previous long is a candidate, use that...
+ t2 = lCandidate.Prev.offset - e.cur
+ if nextS-t2 < maxMatchOffset && lCandidate.Prev.val == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Extend the 4-byte match as long as possible.
+ if l == 0 {
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else if l == maxMatchLength {
+ l += e.matchlenLong(s+l, t+l, src)
+ }
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+ if false {
+ if t >= s {
+ panic(fmt.Sprintln("s-t", s, t))
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", s-t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // Store every 3rd hash in-between.
+ if true {
+ const hashEvery = 3
+ i := s - l + 1
+ if i < s-1 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur, val: uint32(cv)}
+ e.table[hash4x64(cv, tableBits)] = t
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // Do an long at i+1
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1, val: uint32(cv)}
+ eLong = &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // We only have enough bits for a short entry at i+2
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1, val: uint32(cv)}
+ e.table[hash4x64(cv, tableBits)] = t
+
+ // Skip one - otherwise we risk hitting 's'
+ i += 4
+ for ; i < s-1; i += hashEvery {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur, val: uint32(cv)}
+ t2 := tableEntry{offset: t.offset + 1, val: uint32(cv >> 8)}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+ e.table[hash4u(t2.val, tableBits)] = t2
+ }
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ x := load6432(src, s-1)
+ o := e.cur + s - 1
+ prevHashS := hash4x64(x, tableBits)
+ prevHashL := hash7(x, tableBits)
+ e.table[prevHashS] = tableEntry{offset: o, val: uint32(x)}
+ eLong := &e.bTable[prevHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: o, val: uint32(x)}, eLong.Cur
+ cv = x >> 8
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go
new file mode 100644
index 000000000..cad0c7df7
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level6.go
@@ -0,0 +1,279 @@
+package flate
+
+import "fmt"
+
+type fastEncL6 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL6) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ v.Prev.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ }
+ e.bTable[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ // Repeat MUST be > 1 and within range
+ repeat := int32(1)
+ for {
+ const skipLog = 7
+ const doEvery = 1
+
+ nextS := s
+ var l int32
+ var t int32
+ for {
+ nextHashS := hash4x64(cv, tableBits)
+ nextHashL := hash7(cv, tableBits)
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.table[nextHashS] = entry
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+ // Calculate hashes of 'next'
+ nextHashS = hash4x64(next, tableBits)
+ nextHashL = hash7(next, tableBits)
+
+ t = lCandidate.Cur.offset - e.cur
+ if s-t < maxMatchOffset {
+ if uint32(cv) == lCandidate.Cur.val {
+ // Long candidate matches at least 4 bytes.
+
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur
+
+ // Check the previous long candidate as well.
+ t2 := lCandidate.Prev.offset - e.cur
+ if s-t2 < maxMatchOffset && uint32(cv) == lCandidate.Prev.val {
+ l = e.matchlen(s+4, t+4, src) + 4
+ ml1 := e.matchlen(s+4, t2+4, src) + 4
+ if ml1 > l {
+ t = t2
+ l = ml1
+ break
+ }
+ }
+ break
+ }
+ // Current value did not match, but check if previous long value does.
+ t = lCandidate.Prev.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == lCandidate.Prev.val {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur
+ break
+ }
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == sCandidate.val {
+ // Found a 4 match...
+ l = e.matchlen(s+4, t+4, src) + 4
+
+ // Look up next long candidate (at nextS)
+ lCandidate = e.bTable[nextHashL]
+
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur
+
+ // Check repeat at s + repOff
+ const repOff = 1
+ t2 := s - repeat + repOff
+ if load3232(src, t2) == uint32(cv>>(8*repOff)) {
+ ml := e.matchlen(s+4+repOff, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ l = ml
+ s += repOff
+ // Not worth checking more.
+ break
+ }
+ }
+
+ // If the next long is a candidate, use that...
+ t2 = lCandidate.Cur.offset - e.cur
+ if nextS-t2 < maxMatchOffset {
+ if lCandidate.Cur.val == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ // This is ok, but check previous as well.
+ }
+ }
+ // If the previous long is a candidate, use that...
+ t2 = lCandidate.Prev.offset - e.cur
+ if nextS-t2 < maxMatchOffset && lCandidate.Prev.val == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Extend the 4-byte match as long as possible.
+ if l == 0 {
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else if l == maxMatchLength {
+ l += e.matchlenLong(s+l, t+l, src)
+ }
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+ if false {
+ if t >= s {
+ panic(fmt.Sprintln("s-t", s, t))
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", s-t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ repeat = s - t
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index after match end.
+ for i := nextS + 1; i < int32(len(src))-8; i += 2 {
+ cv := load6432(src, i)
+ e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur, val: uint32(cv)}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur, val: uint32(cv)}, eLong.Cur
+ }
+ goto emitRemainder
+ }
+
+ // Store every long hash in-between and every second short.
+ if true {
+ for i := nextS + 1; i < s-1; i += 2 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur, val: uint32(cv)}
+ t2 := tableEntry{offset: t.offset + 1, val: uint32(cv >> 8)}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
+ e.table[hash4x64(cv, tableBits)] = t
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+ eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ cv = load6432(src, s)
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/reverse_bits.go b/vendor/github.com/klauspost/compress/flate/reverse_bits.go
deleted file mode 100644
index c1a02720d..000000000
--- a/vendor/github.com/klauspost/compress/flate/reverse_bits.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-var reverseByte = [256]byte{
- 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
- 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
- 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
- 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
- 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
- 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
- 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
- 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
- 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
- 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
- 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
- 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
- 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
- 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
- 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
- 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
- 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
- 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
- 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
- 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
- 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
- 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
- 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
- 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
- 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
- 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
- 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
- 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
- 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
- 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
- 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
- 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
-}
-
-func reverseUint16(v uint16) uint16 {
- return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8
-}
-
-func reverseBits(number uint16, bitLength byte) uint16 {
- return reverseUint16(number << uint8(16-bitLength))
-}
diff --git a/vendor/github.com/klauspost/compress/flate/snappy.go b/vendor/github.com/klauspost/compress/flate/snappy.go
deleted file mode 100644
index aebebd524..000000000
--- a/vendor/github.com/klauspost/compress/flate/snappy.go
+++ /dev/null
@@ -1,900 +0,0 @@
-// Copyright 2011 The Snappy-Go Authors. All rights reserved.
-// Modified for deflate by Klaus Post (c) 2015.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-// emitLiteral writes a literal chunk and returns the number of bytes written.
-func emitLiteral(dst *tokens, lit []byte) {
- ol := int(dst.n)
- for i, v := range lit {
- dst.tokens[(i+ol)&maxStoreBlockSize] = token(v)
- }
- dst.n += uint16(len(lit))
-}
-
-// emitCopy writes a copy chunk and returns the number of bytes written.
-func emitCopy(dst *tokens, offset, length int) {
- dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize))
- dst.n++
-}
-
-type fastEnc interface {
- Encode(dst *tokens, src []byte)
- Reset()
-}
-
-func newFastEnc(level int) fastEnc {
- switch level {
- case 1:
- return &snappyL1{}
- case 2:
- return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}
- case 3:
- return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}
- case 4:
- return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}}
- default:
- panic("invalid level specified")
- }
-}
-
-const (
- tableBits = 14 // Bits used in the table
- tableSize = 1 << tableBits // Size of the table
- tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
- tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
- baseMatchOffset = 1 // The smallest match offset
- baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
- maxMatchOffset = 1 << 15 // The largest match offset
-)
-
-func load32(b []byte, i int) uint32 {
- b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-}
-
-func load64(b []byte, i int) uint64 {
- b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-}
-
-func hash(u uint32) uint32 {
- return (u * 0x1e35a7bd) >> tableShift
-}
-
-// snappyL1 encapsulates level 1 compression
-type snappyL1 struct{}
-
-func (e *snappyL1) Reset() {}
-
-func (e *snappyL1) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 16 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- )
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- return
- }
-
- // Initialize the hash table.
- //
- // The table element type is uint16, as s < sLimit and sLimit < len(src)
- // and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535.
- var table [tableSize]uint16
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := len(src) - inputMargin
-
- // nextEmit is where in src the next emitLiteral should start from.
- nextEmit := 0
-
- // The encoded form must start with a literal, as there are no previous
- // bytes to copy, so we start looking for hash matches at s == 1.
- s := 1
- nextHash := hash(load32(src, s))
-
- for {
- // Copied from the C++ snappy implementation:
- //
- // Heuristic match skipping: If 32 bytes are scanned with no matches
- // found, start looking only at every other byte. If 32 more bytes are
- // scanned (or skipped), look at every third byte, etc.. When a match
- // is found, immediately go back to looking at every byte. This is a
- // small loss (~5% performance, ~0.1% density) for compressible data
- // due to more bookkeeping, but for non-compressible data (such as
- // JPEG) it's a huge win since the compressor quickly "realizes" the
- // data is incompressible and doesn't bother looking for matches
- // everywhere.
- //
- // The "skip" variable keeps track of how many bytes there are since
- // the last match; dividing it by 32 (ie. right-shifting by five) gives
- // the number of bytes to move ahead for each iteration.
- skip := 32
-
- nextS := s
- candidate := 0
- for {
- s = nextS
- bytesBetweenHashLookups := skip >> 5
- nextS = s + bytesBetweenHashLookups
- skip += bytesBetweenHashLookups
- if nextS > sLimit {
- goto emitRemainder
- }
- candidate = int(table[nextHash&tableMask])
- table[nextHash&tableMask] = uint16(s)
- nextHash = hash(load32(src, nextS))
- if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) {
- break
- }
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
- emitLiteral(dst, src[nextEmit:s])
-
- // Call emitCopy, and then see if another emitCopy could be our next
- // move. Repeat until we find no match for the input immediately after
- // what was consumed by the last emitCopy call.
- //
- // If we exit this loop normally then we need to call emitLiteral next,
- // though we don't yet know how big the literal will be. We handle that
- // by proceeding to the next iteration of the main loop. We also can
- // exit this loop via goto if we get close to exhausting the input.
- for {
- // Invariant: we have a 4-byte match at s, and no need to emit any
- // literal bytes prior to s.
- base := s
-
- // Extend the 4-byte match as long as possible.
- //
- // This is an inlined version of Snappy's:
- // s = extendMatch(src, candidate+4, s+4)
- s += 4
- s1 := base + maxMatchLength
- if s1 > len(src) {
- s1 = len(src)
- }
- a := src[s:s1]
- b := src[candidate+4:]
- b = b[:len(a)]
- l := len(a)
- for i := range a {
- if a[i] != b[i] {
- l = i
- break
- }
- }
- s += l
-
- // matchToken is flate's equivalent of Snappy's emitCopy.
- dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset))
- dst.n++
- nextEmit = s
- if s >= sLimit {
- goto emitRemainder
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-1 and at s. If
- // another emitCopy is not our next move, also calculate nextHash
- // at s+1. At least on GOARCH=amd64, these three hash calculations
- // are faster as one load64 call (with some shifts) instead of
- // three load32 calls.
- x := load64(src, s-1)
- prevHash := hash(uint32(x >> 0))
- table[prevHash&tableMask] = uint16(s - 1)
- currHash := hash(uint32(x >> 8))
- candidate = int(table[currHash&tableMask])
- table[currHash&tableMask] = uint16(s)
- if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) {
- nextHash = hash(uint32(x >> 16))
- s++
- break
- }
- }
- }
-
-emitRemainder:
- if nextEmit < len(src) {
- emitLiteral(dst, src[nextEmit:])
- }
-}
-
-type tableEntry struct {
- val uint32
- offset int32
-}
-
-func load3232(b []byte, i int32) uint32 {
- b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-}
-
-func load6432(b []byte, i int32) uint64 {
- b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-}
-
-// snappyGen maintains the table for matches,
-// and the previous byte block for level 2.
-// This is the generic implementation.
-type snappyGen struct {
- prev []byte
- cur int32
-}
-
-// snappyGen maintains the table for matches,
-// and the previous byte block for level 2.
-// This is the generic implementation.
-type snappyL2 struct {
- snappyGen
- table [tableSize]tableEntry
-}
-
-// EncodeL2 uses a similar algorithm to level 1, but is capable
-// of matching across blocks giving better compression at a small slowdown.
-func (e *snappyL2) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 8 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- )
-
- // Protect against e.cur wraparound.
- if e.cur > 1<<30 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- e.cur = maxStoreBlockSize
- }
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- e.cur += maxStoreBlockSize
- e.prev = e.prev[:0]
- return
- }
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- nextEmit := int32(0)
- s := int32(0)
- cv := load3232(src, s)
- nextHash := hash(cv)
-
- for {
- // Copied from the C++ snappy implementation:
- //
- // Heuristic match skipping: If 32 bytes are scanned with no matches
- // found, start looking only at every other byte. If 32 more bytes are
- // scanned (or skipped), look at every third byte, etc.. When a match
- // is found, immediately go back to looking at every byte. This is a
- // small loss (~5% performance, ~0.1% density) for compressible data
- // due to more bookkeeping, but for non-compressible data (such as
- // JPEG) it's a huge win since the compressor quickly "realizes" the
- // data is incompressible and doesn't bother looking for matches
- // everywhere.
- //
- // The "skip" variable keeps track of how many bytes there are since
- // the last match; dividing it by 32 (ie. right-shifting by five) gives
- // the number of bytes to move ahead for each iteration.
- skip := int32(32)
-
- nextS := s
- var candidate tableEntry
- for {
- s = nextS
- bytesBetweenHashLookups := skip >> 5
- nextS = s + bytesBetweenHashLookups
- skip += bytesBetweenHashLookups
- if nextS > sLimit {
- goto emitRemainder
- }
- candidate = e.table[nextHash&tableMask]
- now := load3232(src, nextS)
- e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv}
- nextHash = hash(now)
-
- offset := s - (candidate.offset - e.cur)
- if offset > maxMatchOffset || cv != candidate.val {
- // Out of range or not matched.
- cv = now
- continue
- }
- break
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
- emitLiteral(dst, src[nextEmit:s])
-
- // Call emitCopy, and then see if another emitCopy could be our next
- // move. Repeat until we find no match for the input immediately after
- // what was consumed by the last emitCopy call.
- //
- // If we exit this loop normally then we need to call emitLiteral next,
- // though we don't yet know how big the literal will be. We handle that
- // by proceeding to the next iteration of the main loop. We also can
- // exit this loop via goto if we get close to exhausting the input.
- for {
- // Invariant: we have a 4-byte match at s, and no need to emit any
- // literal bytes prior to s.
-
- // Extend the 4-byte match as long as possible.
- //
- s += 4
- t := candidate.offset - e.cur + 4
- l := e.matchlen(s, t, src)
-
- // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
- dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
- dst.n++
- s += l
- nextEmit = s
- if s >= sLimit {
- t += l
- // Index first pair after match end.
- if int(t+4) < len(src) && t > 0 {
- cv := load3232(src, t)
- e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv}
- }
- goto emitRemainder
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-1 and at s. If
- // another emitCopy is not our next move, also calculate nextHash
- // at s+1. At least on GOARCH=amd64, these three hash calculations
- // are faster as one load64 call (with some shifts) instead of
- // three load32 calls.
- x := load6432(src, s-1)
- prevHash := hash(uint32(x))
- e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)}
- x >>= 8
- currHash := hash(uint32(x))
- candidate = e.table[currHash&tableMask]
- e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)}
-
- offset := s - (candidate.offset - e.cur)
- if offset > maxMatchOffset || uint32(x) != candidate.val {
- cv = uint32(x >> 8)
- nextHash = hash(cv)
- s++
- break
- }
- }
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- emitLiteral(dst, src[nextEmit:])
- }
- e.cur += int32(len(src))
- e.prev = e.prev[:len(src)]
- copy(e.prev, src)
-}
-
-type tableEntryPrev struct {
- Cur tableEntry
- Prev tableEntry
-}
-
-// snappyL3
-type snappyL3 struct {
- snappyGen
- table [tableSize]tableEntryPrev
-}
-
-// Encode uses a similar algorithm to level 2, will check up to two candidates.
-func (e *snappyL3) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 8 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- )
-
- // Protect against e.cur wraparound.
- if e.cur > 1<<30 {
- for i := range e.table[:] {
- e.table[i] = tableEntryPrev{}
- }
- e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}
- }
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- e.cur += maxStoreBlockSize
- e.prev = e.prev[:0]
- return
- }
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- nextEmit := int32(0)
- s := int32(0)
- cv := load3232(src, s)
- nextHash := hash(cv)
-
- for {
- // Copied from the C++ snappy implementation:
- //
- // Heuristic match skipping: If 32 bytes are scanned with no matches
- // found, start looking only at every other byte. If 32 more bytes are
- // scanned (or skipped), look at every third byte, etc.. When a match
- // is found, immediately go back to looking at every byte. This is a
- // small loss (~5% performance, ~0.1% density) for compressible data
- // due to more bookkeeping, but for non-compressible data (such as
- // JPEG) it's a huge win since the compressor quickly "realizes" the
- // data is incompressible and doesn't bother looking for matches
- // everywhere.
- //
- // The "skip" variable keeps track of how many bytes there are since
- // the last match; dividing it by 32 (ie. right-shifting by five) gives
- // the number of bytes to move ahead for each iteration.
- skip := int32(32)
-
- nextS := s
- var candidate tableEntry
- for {
- s = nextS
- bytesBetweenHashLookups := skip >> 5
- nextS = s + bytesBetweenHashLookups
- skip += bytesBetweenHashLookups
- if nextS > sLimit {
- goto emitRemainder
- }
- candidates := e.table[nextHash&tableMask]
- now := load3232(src, nextS)
- e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
- nextHash = hash(now)
-
- // Check both candidates
- candidate = candidates.Cur
- if cv == candidate.val {
- offset := s - (candidate.offset - e.cur)
- if offset <= maxMatchOffset {
- break
- }
- } else {
- // We only check if value mismatches.
- // Offset will always be invalid in other cases.
- candidate = candidates.Prev
- if cv == candidate.val {
- offset := s - (candidate.offset - e.cur)
- if offset <= maxMatchOffset {
- break
- }
- }
- }
- cv = now
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
- emitLiteral(dst, src[nextEmit:s])
-
- // Call emitCopy, and then see if another emitCopy could be our next
- // move. Repeat until we find no match for the input immediately after
- // what was consumed by the last emitCopy call.
- //
- // If we exit this loop normally then we need to call emitLiteral next,
- // though we don't yet know how big the literal will be. We handle that
- // by proceeding to the next iteration of the main loop. We also can
- // exit this loop via goto if we get close to exhausting the input.
- for {
- // Invariant: we have a 4-byte match at s, and no need to emit any
- // literal bytes prior to s.
-
- // Extend the 4-byte match as long as possible.
- //
- s += 4
- t := candidate.offset - e.cur + 4
- l := e.matchlen(s, t, src)
-
- // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
- dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
- dst.n++
- s += l
- nextEmit = s
- if s >= sLimit {
- t += l
- // Index first pair after match end.
- if int(t+4) < len(src) && t > 0 {
- cv := load3232(src, t)
- nextHash = hash(cv)
- e.table[nextHash&tableMask] = tableEntryPrev{
- Prev: e.table[nextHash&tableMask].Cur,
- Cur: tableEntry{offset: e.cur + t, val: cv},
- }
- }
- goto emitRemainder
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-3 to s. If
- // another emitCopy is not our next move, also calculate nextHash
- // at s+1. At least on GOARCH=amd64, these three hash calculations
- // are faster as one load64 call (with some shifts) instead of
- // three load32 calls.
- x := load6432(src, s-3)
- prevHash := hash(uint32(x))
- e.table[prevHash&tableMask] = tableEntryPrev{
- Prev: e.table[prevHash&tableMask].Cur,
- Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
- }
- x >>= 8
- prevHash = hash(uint32(x))
-
- e.table[prevHash&tableMask] = tableEntryPrev{
- Prev: e.table[prevHash&tableMask].Cur,
- Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
- }
- x >>= 8
- prevHash = hash(uint32(x))
-
- e.table[prevHash&tableMask] = tableEntryPrev{
- Prev: e.table[prevHash&tableMask].Cur,
- Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
- }
- x >>= 8
- currHash := hash(uint32(x))
- candidates := e.table[currHash&tableMask]
- cv = uint32(x)
- e.table[currHash&tableMask] = tableEntryPrev{
- Prev: candidates.Cur,
- Cur: tableEntry{offset: s + e.cur, val: cv},
- }
-
- // Check both candidates
- candidate = candidates.Cur
- if cv == candidate.val {
- offset := s - (candidate.offset - e.cur)
- if offset <= maxMatchOffset {
- continue
- }
- } else {
- // We only check if value mismatches.
- // Offset will always be invalid in other cases.
- candidate = candidates.Prev
- if cv == candidate.val {
- offset := s - (candidate.offset - e.cur)
- if offset <= maxMatchOffset {
- continue
- }
- }
- }
- cv = uint32(x >> 8)
- nextHash = hash(cv)
- s++
- break
- }
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- emitLiteral(dst, src[nextEmit:])
- }
- e.cur += int32(len(src))
- e.prev = e.prev[:len(src)]
- copy(e.prev, src)
-}
-
-// snappyL4
-type snappyL4 struct {
- snappyL3
-}
-
-// Encode uses a similar algorithm to level 3,
-// but will check up to two candidates if first isn't long enough.
-func (e *snappyL4) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 8 - 3
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- matchLenGood = 12
- )
-
- // Protect against e.cur wraparound.
- if e.cur > 1<<30 {
- for i := range e.table[:] {
- e.table[i] = tableEntryPrev{}
- }
- e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}
- }
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- e.cur += maxStoreBlockSize
- e.prev = e.prev[:0]
- return
- }
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- nextEmit := int32(0)
- s := int32(0)
- cv := load3232(src, s)
- nextHash := hash(cv)
-
- for {
- // Copied from the C++ snappy implementation:
- //
- // Heuristic match skipping: If 32 bytes are scanned with no matches
- // found, start looking only at every other byte. If 32 more bytes are
- // scanned (or skipped), look at every third byte, etc.. When a match
- // is found, immediately go back to looking at every byte. This is a
- // small loss (~5% performance, ~0.1% density) for compressible data
- // due to more bookkeeping, but for non-compressible data (such as
- // JPEG) it's a huge win since the compressor quickly "realizes" the
- // data is incompressible and doesn't bother looking for matches
- // everywhere.
- //
- // The "skip" variable keeps track of how many bytes there are since
- // the last match; dividing it by 32 (ie. right-shifting by five) gives
- // the number of bytes to move ahead for each iteration.
- skip := int32(32)
-
- nextS := s
- var candidate tableEntry
- var candidateAlt tableEntry
- for {
- s = nextS
- bytesBetweenHashLookups := skip >> 5
- nextS = s + bytesBetweenHashLookups
- skip += bytesBetweenHashLookups
- if nextS > sLimit {
- goto emitRemainder
- }
- candidates := e.table[nextHash&tableMask]
- now := load3232(src, nextS)
- e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
- nextHash = hash(now)
-
- // Check both candidates
- candidate = candidates.Cur
- if cv == candidate.val {
- offset := s - (candidate.offset - e.cur)
- if offset < maxMatchOffset {
- offset = s - (candidates.Prev.offset - e.cur)
- if cv == candidates.Prev.val && offset < maxMatchOffset {
- candidateAlt = candidates.Prev
- }
- break
- }
- } else {
- // We only check if value mismatches.
- // Offset will always be invalid in other cases.
- candidate = candidates.Prev
- if cv == candidate.val {
- offset := s - (candidate.offset - e.cur)
- if offset < maxMatchOffset {
- break
- }
- }
- }
- cv = now
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
- emitLiteral(dst, src[nextEmit:s])
-
- // Call emitCopy, and then see if another emitCopy could be our next
- // move. Repeat until we find no match for the input immediately after
- // what was consumed by the last emitCopy call.
- //
- // If we exit this loop normally then we need to call emitLiteral next,
- // though we don't yet know how big the literal will be. We handle that
- // by proceeding to the next iteration of the main loop. We also can
- // exit this loop via goto if we get close to exhausting the input.
- for {
- // Invariant: we have a 4-byte match at s, and no need to emit any
- // literal bytes prior to s.
-
- // Extend the 4-byte match as long as possible.
- //
- s += 4
- t := candidate.offset - e.cur + 4
- l := e.matchlen(s, t, src)
- // Try alternative candidate if match length < matchLenGood.
- if l < matchLenGood-4 && candidateAlt.offset != 0 {
- t2 := candidateAlt.offset - e.cur + 4
- l2 := e.matchlen(s, t2, src)
- if l2 > l {
- l = l2
- t = t2
- }
- }
- // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
- dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
- dst.n++
- s += l
- nextEmit = s
- if s >= sLimit {
- t += l
- // Index first pair after match end.
- if int(t+4) < len(src) && t > 0 {
- cv := load3232(src, t)
- nextHash = hash(cv)
- e.table[nextHash&tableMask] = tableEntryPrev{
- Prev: e.table[nextHash&tableMask].Cur,
- Cur: tableEntry{offset: e.cur + t, val: cv},
- }
- }
- goto emitRemainder
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-3 to s. If
- // another emitCopy is not our next move, also calculate nextHash
- // at s+1. At least on GOARCH=amd64, these three hash calculations
- // are faster as one load64 call (with some shifts) instead of
- // three load32 calls.
- x := load6432(src, s-3)
- prevHash := hash(uint32(x))
- e.table[prevHash&tableMask] = tableEntryPrev{
- Prev: e.table[prevHash&tableMask].Cur,
- Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
- }
- x >>= 8
- prevHash = hash(uint32(x))
-
- e.table[prevHash&tableMask] = tableEntryPrev{
- Prev: e.table[prevHash&tableMask].Cur,
- Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
- }
- x >>= 8
- prevHash = hash(uint32(x))
-
- e.table[prevHash&tableMask] = tableEntryPrev{
- Prev: e.table[prevHash&tableMask].Cur,
- Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
- }
- x >>= 8
- currHash := hash(uint32(x))
- candidates := e.table[currHash&tableMask]
- cv = uint32(x)
- e.table[currHash&tableMask] = tableEntryPrev{
- Prev: candidates.Cur,
- Cur: tableEntry{offset: s + e.cur, val: cv},
- }
-
- // Check both candidates
- candidate = candidates.Cur
- candidateAlt = tableEntry{}
- if cv == candidate.val {
- offset := s - (candidate.offset - e.cur)
- if offset <= maxMatchOffset {
- offset = s - (candidates.Prev.offset - e.cur)
- if cv == candidates.Prev.val && offset <= maxMatchOffset {
- candidateAlt = candidates.Prev
- }
- continue
- }
- } else {
- // We only check if value mismatches.
- // Offset will always be invalid in other cases.
- candidate = candidates.Prev
- if cv == candidate.val {
- offset := s - (candidate.offset - e.cur)
- if offset <= maxMatchOffset {
- continue
- }
- }
- }
- cv = uint32(x >> 8)
- nextHash = hash(cv)
- s++
- break
- }
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- emitLiteral(dst, src[nextEmit:])
- }
- e.cur += int32(len(src))
- e.prev = e.prev[:len(src)]
- copy(e.prev, src)
-}
-
-func (e *snappyGen) matchlen(s, t int32, src []byte) int32 {
- s1 := int(s) + maxMatchLength - 4
- if s1 > len(src) {
- s1 = len(src)
- }
-
- // If we are inside the current block
- if t >= 0 {
- b := src[t:]
- a := src[s:s1]
- b = b[:len(a)]
- // Extend the match to be as long as possible.
- for i := range a {
- if a[i] != b[i] {
- return int32(i)
- }
- }
- return int32(len(a))
- }
-
- // We found a match in the previous block.
- tp := int32(len(e.prev)) + t
- if tp < 0 {
- return 0
- }
-
- // Extend the match to be as long as possible.
- a := src[s:s1]
- b := e.prev[tp:]
- if len(b) > len(a) {
- b = b[:len(a)]
- }
- a = a[:len(b)]
- for i := range b {
- if a[i] != b[i] {
- return int32(i)
- }
- }
-
- // If we reached our limit, we matched everything we are
- // allowed to in the previous block and we return.
- n := int32(len(b))
- if int(s+n) == s1 {
- return n
- }
-
- // Continue looking for more matches in the current block.
- a = src[s+n : s1]
- b = src[:len(a)]
- for i := range a {
- if a[i] != b[i] {
- return int32(i) + n
- }
- }
- return int32(len(a)) + n
-}
-
-// Reset the encoding table.
-func (e *snappyGen) Reset() {
- e.prev = e.prev[:0]
- e.cur += maxMatchOffset
-}
diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go
new file mode 100644
index 000000000..a47051197
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -0,0 +1,266 @@
+package flate
+
+import (
+ "io"
+ "math"
+ "sync"
+)
+
+const (
+ maxStatelessBlock = math.MaxInt16
+
+ slTableBits = 13
+ slTableSize = 1 << slTableBits
+ slTableShift = 32 - slTableBits
+)
+
+type statelessWriter struct {
+ dst io.Writer
+ closed bool
+}
+
+func (s *statelessWriter) Close() error {
+ if s.closed {
+ return nil
+ }
+ s.closed = true
+ // Emit EOF block
+ return StatelessDeflate(s.dst, nil, true)
+}
+
+func (s *statelessWriter) Write(p []byte) (n int, err error) {
+ err = StatelessDeflate(s.dst, p, false)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+}
+
+func (s *statelessWriter) Reset(w io.Writer) {
+ s.dst = w
+ s.closed = false
+}
+
+// NewStatelessWriter will do compression but without maintaining any state
+// between Write calls.
+// There will be no memory kept between Write calls,
+// but compression and speed will be suboptimal.
+// Because of this, the size of actual Write calls will affect output size.
+func NewStatelessWriter(dst io.Writer) io.WriteCloser {
+ return &statelessWriter{dst: dst}
+}
+
+// bitWriterPool contains bit writers that can be reused.
+var bitWriterPool = sync.Pool{
+ New: func() interface{} {
+ return newHuffmanBitWriter(nil)
+ },
+}
+
+// StatelessDeflate allows to compress directly to a Writer without retaining state.
+// When returning everything will be flushed.
+func StatelessDeflate(out io.Writer, in []byte, eof bool) error {
+ var dst tokens
+ bw := bitWriterPool.Get().(*huffmanBitWriter)
+ bw.reset(out)
+ defer func() {
+ // don't keep a reference to our output
+ bw.reset(nil)
+ bitWriterPool.Put(bw)
+ }()
+ if eof && len(in) == 0 {
+ // Just write an EOF block.
+ // Could be faster...
+ bw.writeStoredHeader(0, true)
+ bw.flush()
+ return bw.err
+ }
+
+ for len(in) > 0 {
+ todo := in
+ if len(todo) > maxStatelessBlock {
+ todo = todo[:maxStatelessBlock]
+ }
+ in = in[len(todo):]
+ // Compress
+ statelessEnc(&dst, todo)
+ isEof := eof && len(in) == 0
+
+ if dst.n == 0 {
+ bw.writeStoredHeader(len(todo), isEof)
+ if bw.err != nil {
+ return bw.err
+ }
+ bw.writeBytes(todo)
+ } else if int(dst.n) > len(todo)-len(todo)>>4 {
+ // If we removed less than 1/16th, huffman compress the block.
+ bw.writeBlockHuff(isEof, todo, false)
+ } else {
+ bw.writeBlockDynamic(&dst, isEof, todo, false)
+ }
+ if bw.err != nil {
+ return bw.err
+ }
+ dst.Reset()
+ }
+ if !eof {
+ // Align.
+ bw.writeStoredHeader(0, false)
+ }
+ bw.flush()
+ return bw.err
+}
+
+func hashSL(u uint32) uint32 {
+ return (u * 0x1e35a7bd) >> slTableShift
+}
+
+func load3216(b []byte, i int16) uint32 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:4]
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load6416(b []byte, i int16) uint64 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:8]
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func statelessEnc(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ type tableEntry struct {
+ offset int16
+ }
+
+ var table [slTableSize]tableEntry
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ s := int16(1)
+ nextEmit := int16(0)
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int16(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load3216(src, s)
+
+ for {
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hashSL(cv)
+ candidate = table[nextHash]
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit || nextS <= 0 {
+ goto emitRemainder
+ }
+
+ now := load6416(src, nextS)
+ table[nextHash] = tableEntry{offset: s}
+ nextHash = hashSL(uint32(now))
+
+ if cv == load3216(src, candidate.offset) {
+ table[nextHash] = tableEntry{offset: nextS}
+ break
+ }
+
+ // Do one right away...
+ cv = uint32(now)
+ s = nextS
+ nextS++
+ candidate = table[nextHash]
+ now >>= 8
+ table[nextHash] = tableEntry{offset: s}
+
+ if cv == load3216(src, candidate.offset) {
+ table[nextHash] = tableEntry{offset: nextS}
+ break
+ }
+ cv = uint32(now)
+ s = nextS
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset
+ l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+
+ // Save the match found
+ dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6416(src, s-2)
+ o := s - 2
+ prevHash := hashSL(uint32(x))
+ table[prevHash] = tableEntry{offset: o}
+ x >>= 16
+ currHash := hashSL(uint32(x))
+ candidate = table[currHash]
+ table[currHash] = tableEntry{offset: o + 2}
+
+ if uint32(x) != load3216(src, candidate.offset) {
+ cv = uint32(x >> 8)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go
index 141299b97..b3df0d894 100644
--- a/vendor/github.com/klauspost/compress/flate/token.go
+++ b/vendor/github.com/klauspost/compress/flate/token.go
@@ -4,6 +4,14 @@
package flate
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+)
+
const (
// 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
// 8 bits: xlength = length - MIN_MATCH_LENGTH
@@ -46,6 +54,36 @@ var lengthCodes = [256]uint8{
27, 27, 27, 27, 27, 28,
}
+// lengthCodes1 is length codes, but starting at 1.
+var lengthCodes1 = [256]uint8{
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 9,
+ 10, 10, 11, 11, 12, 12, 13, 13, 13, 13,
+ 14, 14, 14, 14, 15, 15, 15, 15, 16, 16,
+ 16, 16, 17, 17, 17, 17, 17, 17, 17, 17,
+ 18, 18, 18, 18, 18, 18, 18, 18, 19, 19,
+ 19, 19, 19, 19, 19, 19, 20, 20, 20, 20,
+ 20, 20, 20, 20, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 29,
+}
+
var offsetCodes = [256]uint32{
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
@@ -65,19 +103,236 @@ var offsetCodes = [256]uint32{
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
}
+// offsetCodes14 are offsetCodes, but with 14 added.
+var offsetCodes14 = [256]uint32{
+ 14, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+}
+
type token uint32
type tokens struct {
- tokens [maxStoreBlockSize + 1]token
- n uint16 // Must be able to contain maxStoreBlockSize
+ nLits int
+ extraHist [32]uint16 // codes 256->maxnumlit
+ offHist [32]uint16 // offset codes
+ litHist [256]uint16 // codes 0->255
+ n uint16 // Must be able to contain maxStoreBlockSize
+ tokens [maxStoreBlockSize + 1]token
+}
+
+func (t *tokens) Reset() {
+ if t.n == 0 {
+ return
+ }
+ t.n = 0
+ t.nLits = 0
+ for i := range t.litHist[:] {
+ t.litHist[i] = 0
+ }
+ for i := range t.extraHist[:] {
+ t.extraHist[i] = 0
+ }
+ for i := range t.offHist[:] {
+ t.offHist[i] = 0
+ }
+}
+
+func (t *tokens) Fill() {
+ if t.n == 0 {
+ return
+ }
+ for i, v := range t.litHist[:] {
+ if v == 0 {
+ t.litHist[i] = 1
+ t.nLits++
+ }
+ }
+ for i, v := range t.extraHist[:literalCount-256] {
+ if v == 0 {
+ t.nLits++
+ t.extraHist[i] = 1
+ }
+ }
+ for i, v := range t.offHist[:offsetCodeCount] {
+ if v == 0 {
+ t.offHist[i] = 1
+ }
+ }
+}
+
+func indexTokens(in []token) tokens {
+ var t tokens
+ t.indexTokens(in)
+ return t
+}
+
+func (t *tokens) indexTokens(in []token) {
+ t.Reset()
+ for _, tok := range in {
+ if tok < matchType {
+ t.tokens[t.n] = tok
+ t.litHist[tok]++
+ t.n++
+ continue
+ }
+ t.AddMatch(uint32(tok.length()), tok.offset())
+ }
}
-// Convert a literal into a literal token.
-func literalToken(literal uint32) token { return token(literalType + literal) }
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+func emitLiteral(dst *tokens, lit []byte) {
+ ol := int(dst.n)
+ for i, v := range lit {
+ dst.tokens[(i+ol)&maxStoreBlockSize] = token(v)
+ dst.litHist[v]++
+ }
+ dst.n += uint16(len(lit))
+ dst.nLits += len(lit)
+}
-// Convert a < xlength, xoffset > pair into a match token.
-func matchToken(xlength uint32, xoffset uint32) token {
- return token(matchType + xlength<<lengthShift + xoffset)
+func (t *tokens) AddLiteral(lit byte) {
+ t.tokens[t.n] = token(lit)
+ t.litHist[lit]++
+ t.n++
+ t.nLits++
+}
+
+// EstimatedBits will return an minimum size estimated by an *optimal*
+// compression of the block.
+// The size of the block
+func (t *tokens) EstimatedBits() int {
+ shannon := float64(0)
+ bits := int(0)
+ nMatches := 0
+ if t.nLits > 0 {
+ invTotal := 1.0 / float64(t.nLits)
+ for _, v := range t.litHist[:] {
+ if v > 0 {
+ n := float64(v)
+ shannon += math.Ceil(-math.Log2(n*invTotal) * n)
+ }
+ }
+ // Just add 15 for EOB
+ shannon += 15
+ for _, v := range t.extraHist[1 : literalCount-256] {
+ if v > 0 {
+ n := float64(v)
+ shannon += math.Ceil(-math.Log2(n*invTotal) * n)
+ bits += int(lengthExtraBits[v&31]) * int(v)
+ nMatches += int(v)
+ }
+ }
+ }
+ if nMatches > 0 {
+ invTotal := 1.0 / float64(nMatches)
+ for _, v := range t.offHist[:offsetCodeCount] {
+ if v > 0 {
+ n := float64(v)
+ shannon += math.Ceil(-math.Log2(n*invTotal) * n)
+ bits += int(offsetExtraBits[v&31]) * int(n)
+ }
+ }
+ }
+
+ return int(shannon) + bits
+}
+
+// AddMatch adds a match to the tokens.
+// This function is very sensitive to inlining and right on the border.
+func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
+ if debugDecode {
+ if xlength >= maxMatchLength+baseMatchLength {
+ panic(fmt.Errorf("invalid length: %v", xlength))
+ }
+ if xoffset >= maxMatchOffset+baseMatchOffset {
+ panic(fmt.Errorf("invalid offset: %v", xoffset))
+ }
+ }
+ t.nLits++
+ lengthCode := lengthCodes1[uint8(xlength)] & 31
+ t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset)
+ t.extraHist[lengthCode]++
+ t.offHist[offsetCode(xoffset)&31]++
+ t.n++
+}
+
+// AddMatchLong adds a match to the tokens, potentially longer than max match length.
+// Length should NOT have the base subtracted, only offset should.
+func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
+ if debugDecode {
+ if xoffset >= maxMatchOffset+baseMatchOffset {
+ panic(fmt.Errorf("invalid offset: %v", xoffset))
+ }
+ }
+ oc := offsetCode(xoffset) & 31
+ for xlength > 0 {
+ xl := xlength
+ if xl > 258 {
+ // We need to have at least baseMatchLength left over for next loop.
+ xl = 258 - baseMatchLength
+ }
+ xlength -= xl
+ xl -= 3
+ t.nLits++
+ lengthCode := lengthCodes1[uint8(xl)] & 31
+ t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
+ t.extraHist[lengthCode]++
+ t.offHist[oc]++
+ t.n++
+ }
+}
+
+func (t *tokens) AddEOB() {
+ t.tokens[t.n] = token(endBlockMarker)
+ t.extraHist[0]++
+ t.n++
+}
+
+func (t *tokens) Slice() []token {
+ return t.tokens[:t.n]
+}
+
+// VarInt returns the tokens as varint encoded bytes.
+func (t *tokens) VarInt() []byte {
+ var b = make([]byte, binary.MaxVarintLen32*int(t.n))
+ var off int
+ for _, v := range t.tokens[:t.n] {
+ off += binary.PutUvarint(b[off:], uint64(v))
+ }
+ return b[:off]
+}
+
+// FromVarInt restores t to the varint encoded tokens provided.
+// Any data in t is removed.
+func (t *tokens) FromVarInt(b []byte) error {
+ var buf = bytes.NewReader(b)
+ var toks []token
+ for {
+ r, err := binary.ReadUvarint(buf)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ toks = append(toks, token(r))
+ }
+ t.indexTokens(toks)
+ return nil
}
// Returns the type of a token
@@ -96,11 +351,17 @@ func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) }
// Returns the offset code corresponding to a specific offset
func offsetCode(off uint32) uint32 {
+ if false {
+ if off < uint32(len(offsetCodes)) {
+ return offsetCodes[off&255]
+ } else if off>>7 < uint32(len(offsetCodes)) {
+ return offsetCodes[(off>>7)&255] + 14
+ } else {
+ return offsetCodes[(off>>14)&255] + 28
+ }
+ }
if off < uint32(len(offsetCodes)) {
- return offsetCodes[off&255]
- } else if off>>7 < uint32(len(offsetCodes)) {
- return offsetCodes[(off>>7)&255] + 14
- } else {
- return offsetCodes[(off>>14)&255] + 28
+ return offsetCodes[uint8(off)]
}
+ return offsetCodes14[uint8(off>>7)]
}
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index dd4f7fefb..51e00aaeb 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -54,6 +54,12 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
canReuse = s.canUseTable(s.prevTable)
}
+ // We want the output size to be less than this:
+ wantSize := len(in)
+ if s.WantLogLess > 0 {
+ wantSize -= wantSize >> s.WantLogLess
+ }
+
// Reset for next run.
s.clearCount = true
s.maxCount = 0
@@ -77,7 +83,7 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
s.cTable = s.prevTable
s.Out, err = compressor(in)
s.cTable = keepTable
- if err == nil && len(s.Out) < len(in) {
+ if err == nil && len(s.Out) < wantSize {
s.OutData = s.Out
return s.Out, true, nil
}
@@ -100,13 +106,16 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
hSize := len(s.Out)
oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen])
newSize := s.cTable.estimateSize(s.count[:s.symbolLen])
- if oldSize <= hSize+newSize || hSize+12 >= len(in) {
+ if oldSize <= hSize+newSize || hSize+12 >= wantSize {
// Retain cTable even if we re-use.
keepTable := s.cTable
s.cTable = s.prevTable
s.Out, err = compressor(in)
s.cTable = keepTable
- if len(s.Out) >= len(in) {
+ if err != nil {
+ return nil, false, err
+ }
+ if len(s.Out) >= wantSize {
return nil, false, ErrIncompressible
}
s.OutData = s.Out
@@ -128,7 +137,7 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
s.OutTable = nil
return nil, false, err
}
- if len(s.Out) >= len(in) {
+ if len(s.Out) >= wantSize {
s.OutTable = nil
return nil, false, ErrIncompressible
}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 43b4815b3..97ae66a4a 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -15,8 +15,7 @@ type dTable struct {
// single-symbols decoding
type dEntrySingle struct {
- byte uint8
- nBits uint8
+ entry uint16
}
// double-symbols decoding
@@ -76,14 +75,15 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
}
// collect weight stats
- var rankStats [tableLogMax + 1]uint32
+ var rankStats [16]uint32
weightTotal := uint32(0)
for _, v := range s.huffWeight[:s.symbolLen] {
if v > tableLogMax {
return s, nil, errors.New("corrupt input: weight too large")
}
- rankStats[v]++
- weightTotal += (1 << (v & 15)) >> 1
+ v2 := v & 15
+ rankStats[v2]++
+ weightTotal += (1 << v2) >> 1
}
if weightTotal == 0 {
return s, nil, errors.New("corrupt input: weights zero")
@@ -134,15 +134,17 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
if len(s.dt.single) != tSize {
s.dt.single = make([]dEntrySingle, tSize)
}
-
for n, w := range s.huffWeight[:s.symbolLen] {
+ if w == 0 {
+ continue
+ }
length := (uint32(1) << w) >> 1
d := dEntrySingle{
- byte: uint8(n),
- nBits: s.actualTableLog + 1 - w,
+ entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8),
}
- for u := rankStats[w]; u < rankStats[w]+length; u++ {
- s.dt.single[u] = d
+ single := s.dt.single[rankStats[w] : rankStats[w]+length]
+ for i := range single {
+ single[i] = d
}
rankStats[w] += length
}
@@ -167,12 +169,12 @@ func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) {
decode := func() byte {
val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */
v := s.dt.single[val]
- br.bitsRead += v.nBits
- return v.byte
+ br.bitsRead += uint8(v.entry)
+ return uint8(v.entry >> 8)
}
hasDec := func(v dEntrySingle) byte {
- br.bitsRead += v.nBits
- return v.byte
+ br.bitsRead += uint8(v.entry)
+ return uint8(v.entry >> 8)
}
// Avoid bounds check by always having full sized table.
@@ -269,33 +271,81 @@ func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
decode := func(br *bitReader) byte {
val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */
v := single[val&tlMask]
- br.bitsRead += v.nBits
- return v.byte
+ br.bitsRead += uint8(v.entry)
+ return uint8(v.entry >> 8)
}
// Use temp table to avoid bound checks/append penalty.
var tmp = s.huffWeight[:256]
var off uint8
+ var decoded int
// Decode 2 values from each decoder/loop.
const bufoff = 256 / 4
bigloop:
for {
for i := range br {
- if br[i].off < 4 {
+ br := &br[i]
+ if br.off < 4 {
break bigloop
}
- br[i].fillFast()
+ br.fillFast()
+ }
+
+ {
+ const stream = 0
+ val := br[stream].peekBitsFast(s.actualTableLog)
+ v := single[val&tlMask]
+ br[stream].bitsRead += uint8(v.entry)
+
+ val2 := br[stream].peekBitsFast(s.actualTableLog)
+ v2 := single[val2&tlMask]
+ tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+ tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+ br[stream].bitsRead += uint8(v2.entry)
+ }
+
+ {
+ const stream = 1
+ val := br[stream].peekBitsFast(s.actualTableLog)
+ v := single[val&tlMask]
+ br[stream].bitsRead += uint8(v.entry)
+
+ val2 := br[stream].peekBitsFast(s.actualTableLog)
+ v2 := single[val2&tlMask]
+ tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+ tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+ br[stream].bitsRead += uint8(v2.entry)
}
- tmp[off] = decode(&br[0])
- tmp[off+bufoff] = decode(&br[1])
- tmp[off+bufoff*2] = decode(&br[2])
- tmp[off+bufoff*3] = decode(&br[3])
- tmp[off+1] = decode(&br[0])
- tmp[off+1+bufoff] = decode(&br[1])
- tmp[off+1+bufoff*2] = decode(&br[2])
- tmp[off+1+bufoff*3] = decode(&br[3])
+
+ {
+ const stream = 2
+ val := br[stream].peekBitsFast(s.actualTableLog)
+ v := single[val&tlMask]
+ br[stream].bitsRead += uint8(v.entry)
+
+ val2 := br[stream].peekBitsFast(s.actualTableLog)
+ v2 := single[val2&tlMask]
+ tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+ tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+ br[stream].bitsRead += uint8(v2.entry)
+ }
+
+ {
+ const stream = 3
+ val := br[stream].peekBitsFast(s.actualTableLog)
+ v := single[val&tlMask]
+ br[stream].bitsRead += uint8(v.entry)
+
+ val2 := br[stream].peekBitsFast(s.actualTableLog)
+ v2 := single[val2&tlMask]
+ tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+ tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+ br[stream].bitsRead += uint8(v2.entry)
+ }
+
off += 2
+
if off == bufoff {
if bufoff > dstEvery {
return nil, errors.New("corruption detected: stream overrun 1")
@@ -306,6 +356,7 @@ bigloop:
copy(dstOut[dstEvery*3:], tmp[bufoff*3:bufoff*4])
off = 0
dstOut = dstOut[bufoff:]
+ decoded += 256
// There must at least be 3 buffers left.
if len(dstOut) < dstEvery*3 {
return nil, errors.New("corruption detected: stream overrun 2")
@@ -321,9 +372,11 @@ bigloop:
copy(dstOut[dstEvery:dstEvery+ioff], tmp[bufoff:bufoff*2])
copy(dstOut[dstEvery*2:dstEvery*2+ioff], tmp[bufoff*2:bufoff*3])
copy(dstOut[dstEvery*3:dstEvery*3+ioff], tmp[bufoff*3:bufoff*4])
+ decoded += int(off) * 4
dstOut = dstOut[off:]
}
+ // Decode remaining.
for i := range br {
offset := dstEvery * i
br := &br[i]
@@ -335,12 +388,15 @@ bigloop:
dstOut[offset] = decode(br)
offset++
}
+ decoded += offset - dstEvery*i
err = br.close()
if err != nil {
return nil, err
}
}
-
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
return s.Out, nil
}
@@ -360,7 +416,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
broken++
if enc.nBits == 0 {
for _, dec := range dt {
- if dec.byte == byte(sym) {
+ if uint8(dec.entry>>8) == byte(sym) {
fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym)
errs++
break
@@ -376,12 +432,12 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
top := enc.val << ub
// decoder looks at top bits.
dec := dt[top]
- if dec.nBits != enc.nBits {
- fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, dec.nBits)
+ if uint8(dec.entry) != enc.nBits {
+ fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry))
errs++
}
- if dec.byte != uint8(sym) {
- fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, dec.byte)
+ if uint8(dec.entry>>8) != uint8(sym) {
+ fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8))
errs++
}
if errs > 0 {
@@ -392,12 +448,12 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
for i := uint16(0); i < (1 << ub); i++ {
vval := top | i
dec := dt[vval]
- if dec.nBits != enc.nBits {
- fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, dec.nBits)
+ if uint8(dec.entry) != enc.nBits {
+ fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry))
errs++
}
- if dec.byte != uint8(sym) {
- fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, dec.byte)
+ if uint8(dec.entry>>8) != uint8(sym) {
+ fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8))
errs++
}
if errs > 20 {
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index 6f823f94d..6bc23bbf0 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -89,6 +89,12 @@ type Scratch struct {
// Reuse will specify the reuse policy
Reuse ReusePolicy
+ // WantLogLess allows to specify a log 2 reduction that should at least be achieved,
+ // otherwise the block will be returned as incompressible.
+ // The reduction should then at least be (input size >> WantLogLess)
+ // If WantLogLess == 0 any improvement will do.
+ WantLogLess uint8
+
// MaxDecodedSize will set the maximum allowed output size.
// This value will automatically be set to BlockSizeMax if not set.
// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index d9d38b23f..52dc0aee3 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -26,8 +26,12 @@ Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd
### Status:
-BETA - there may still be subtle bugs, but a wide variety of content has been tested.
-There may still be implementation specific stuff in regards to error handling that could lead to edge cases.
+STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively
+used by several projects. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
+kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
+
+There may still be specific combinations of data types/size/settings that could lead to edge cases,
+so as always, testing is recommended.
For now, a high speed (fastest) and medium-fast (default) compressor has been implemented.
@@ -251,8 +255,12 @@ The converter `s` can be reused to avoid allocations, even after errors.
## Decompressor
-STATUS: Release Candidate - there may still be subtle bugs, but a wide variety of content has been tested.
+Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
+This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
+kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
+The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder,
+or run it past its limits with ANY input provided.
### Usage
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 3e161ea15..ed670bcc7 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -11,6 +11,7 @@ import (
"sync"
"github.com/klauspost/compress/huff0"
+ "github.com/klauspost/compress/zstd/internal/xxhash"
)
type blockType uint8
@@ -88,6 +89,7 @@ type blockDec struct {
sequenceBuf []seq
tmp [4]byte
err error
+ decWG sync.WaitGroup
}
func (b *blockDec) String() string {
@@ -104,6 +106,7 @@ func newBlockDec(lowMem bool) *blockDec {
input: make(chan struct{}, 1),
history: make(chan *history, 1),
}
+ b.decWG.Add(1)
go b.startDecoder()
return &b
}
@@ -160,7 +163,8 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
b.data, err = br.readBig(cSize, b.dataStorage)
if err != nil {
if debug {
- println("Reading block:", err)
+ println("Reading block:", err, "(", cSize, ")", len(b.data))
+ printf("%T", br)
}
return err
}
@@ -181,11 +185,13 @@ func (b *blockDec) Close() {
close(b.input)
close(b.history)
close(b.result)
+ b.decWG.Wait()
}
// decodeAsync will prepare decoding the block when it receives input.
// This will separate output and history.
func (b *blockDec) startDecoder() {
+ defer b.decWG.Done()
for range b.input {
//println("blockDec: Got block input")
switch b.Type {
@@ -275,7 +281,7 @@ func (b *blockDec) decodeBuf(hist *history) error {
hist.b = nil
err := b.decodeCompressed(hist)
if debug {
- println("Decompressed to total", len(b.dst), "bytes, error:", err)
+ println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
}
hist.b = b.dst
b.dst = saved
@@ -368,7 +374,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
}
if debug {
- println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize", litCompSize)
+ println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
}
var literals []byte
var huff *huff0.Scratch
@@ -426,7 +432,6 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
literals = in[:litCompSize]
in = in[litCompSize:]
-
huff = huffDecoderPool.Get().(*huff0.Scratch)
var err error
// Ensure we have space to store it.
@@ -637,7 +642,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
hist.huffTree = huff
}
if debug {
- println("Final literals:", len(literals), "and", nSeqs, "sequences.")
+ println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.")
}
if nSeqs == 0 {
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 9d9151a0e..99eccda11 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -51,7 +51,7 @@ func (b *blockEnc) init() {
b.coders.llEnc = &fseEncoder{}
b.coders.llPrev = &fseEncoder{}
}
- b.litEnc = &huff0.Scratch{}
+ b.litEnc = &huff0.Scratch{WantLogLess: 4}
b.reset(nil)
}
@@ -300,13 +300,13 @@ func (b *blockEnc) encodeRaw(a []byte) {
}
// encodeLits can be used if the block is only litLen.
-func (b *blockEnc) encodeLits() error {
+func (b *blockEnc) encodeLits(raw bool) error {
var bh blockHeader
bh.setLast(b.last)
bh.setSize(uint32(len(b.literals)))
// Don't compress extremely small blocks
- if len(b.literals) < 32 {
+ if len(b.literals) < 32 || raw {
if debug {
println("Adding RAW block, length", len(b.literals))
}
@@ -391,10 +391,56 @@ func (b *blockEnc) encodeLits() error {
return nil
}
+// fuzzFseEncoder can be used to fuzz the FSE encoder.
+func fuzzFseEncoder(data []byte) int {
+ if len(data) > maxSequences || len(data) < 2 {
+ return 0
+ }
+ enc := fseEncoder{}
+ hist := enc.Histogram()[:256]
+ maxSym := uint8(0)
+ for i, v := range data {
+ v = v & 63
+ data[i] = v
+ hist[v]++
+ if v > maxSym {
+ maxSym = v
+ }
+ }
+ if maxSym == 0 {
+ // All 0
+ return 0
+ }
+ maxCount := func(a []uint32) int {
+ var max uint32
+ for _, v := range a {
+ if v > max {
+ max = v
+ }
+ }
+ return int(max)
+ }
+ cnt := maxCount(hist[:maxSym])
+ if cnt == len(data) {
+ // RLE
+ return 0
+ }
+ enc.HistogramFinished(maxSym, cnt)
+ err := enc.normalizeCount(len(data))
+ if err != nil {
+ return 0
+ }
+ _, err = enc.writeCount(nil)
+ if err != nil {
+ panic(err)
+ }
+ return 1
+}
+
// encode will encode the block and put the output in b.output.
-func (b *blockEnc) encode() error {
+func (b *blockEnc) encode(raw bool) error {
if len(b.sequences) == 0 {
- return b.encodeLits()
+ return b.encodeLits(raw)
}
// We want some difference
if len(b.literals) > (b.size - (b.size >> 5)) {
@@ -412,19 +458,13 @@ func (b *blockEnc) encode() error {
reUsed, single bool
err error
)
- if len(b.literals) >= 1024 {
+ if len(b.literals) >= 1024 && !raw {
// Use 4 Streams.
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
- if len(out) > len(b.literals)-len(b.literals)>>4 {
- err = huff0.ErrIncompressible
- }
- } else if len(b.literals) > 32 {
+ } else if len(b.literals) > 32 && !raw {
// Use 1 stream
single = true
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
- if len(out) > len(b.literals)-len(b.literals)>>4 {
- err = huff0.ErrIncompressible
- }
} else {
err = huff0.ErrIncompressible
}
@@ -711,7 +751,7 @@ func (b *blockEnc) encode() error {
return nil
}
-var errIncompressible = errors.New("uncompressible")
+var errIncompressible = errors.New("incompressible")
func (b *blockEnc) genCodes() {
if len(b.sequences) == 0 {
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
index 3538063f1..07321acb1 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -101,6 +101,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
dst = make([]byte, n)
}
n2, err := io.ReadFull(r.r, dst[:n])
+ if err == io.EOF && n > 0 {
+ err = io.ErrUnexpectedEOF
+ }
return dst[:n2], err
}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index f4db3096a..1de94eef0 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -75,6 +75,7 @@ var (
// The Reset function can be used to initiate a new stream, which is will considerably
// reduce the allocations normally caused by NewReader.
func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
+ initPredefined()
var d Decoder
d.o.setDefault()
for _, o := range opts {
@@ -123,7 +124,9 @@ func (d *Decoder) Read(p []byte) (int, error) {
if d.current.err != nil {
break
}
- d.nextBlock()
+ if !d.nextBlock(n == 0) {
+ return n, nil
+ }
}
}
if len(d.current.b) > 0 {
@@ -251,7 +254,7 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
if d.current.err != nil {
break
}
- d.nextBlock()
+ d.nextBlock(true)
}
err := d.current.err
if err != nil {
@@ -328,7 +331,10 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
// nextBlock returns the next block.
// If an error occurs d.err will be set.
-func (d *Decoder) nextBlock() {
+// Optionally the function can block for new output.
+// If non-blocking mode is used the returned boolean will be false
+// if no data was available without blocking.
+func (d *Decoder) nextBlock(blocking bool) (ok bool) {
if d.current.d != nil {
if debug {
printf("re-adding current decoder %p", d.current.d)
@@ -338,12 +344,22 @@ func (d *Decoder) nextBlock() {
}
if d.current.err != nil {
// Keep error state.
- return
+ return blocking
+ }
+
+ if blocking {
+ d.current.decodeOutput = <-d.current.output
+ } else {
+ select {
+ case d.current.decodeOutput = <-d.current.output:
+ default:
+ return false
+ }
}
- d.current.decodeOutput = <-d.current.output
if debug {
println("got", len(d.current.b), "bytes, error:", d.current.err)
}
+ return true
}
// Close will release all resources.
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index e120625d8..2f41bcd0d 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -235,7 +235,7 @@ encodeLoop:
if debug && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
- if debug {
+ if debugMatches {
println("long match")
}
break
@@ -259,7 +259,7 @@ encodeLoop:
// but the likelihood of both the first 4 bytes and the hash matching should be enough.
t = candidateL.offset - e.cur
s += checkAt
- if debug {
+ if debugMatches {
println("long match (after short)")
}
break
@@ -275,7 +275,7 @@ encodeLoop:
if debug && t < 0 {
panic("t<0")
}
- if debug {
+ if debugMatches {
println("short match")
}
break
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index b7011be29..f413042f4 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -59,6 +59,7 @@ type encoderState struct {
// NewWriter will create a new Zstandard encoder.
// If the encoder will be used for encoding blocks a nil writer can be used.
func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) {
+ initPredefined()
var e Encoder
e.o.setDefault()
for _, o := range opts {
@@ -261,7 +262,7 @@ func (e *Encoder) nextBlock(final bool) error {
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
- err = blk.encode()
+ err = blk.encode(e.o.noEntropy)
}
switch err {
case errIncompressible:
@@ -393,12 +394,31 @@ func (e *Encoder) Close() error {
// EncodeAll will encode all input in src and append it to dst.
// This function can be called concurrently, but each call will only run on a single goroutine.
-// If empty input is given, nothing is returned.
+// If empty input is given, nothing is returned, unless WithZeroFrames is specified.
// Encoded blocks can be concatenated and the result will be the combined input stream.
// Data compressed with EncodeAll can be decoded with the Decoder,
// using either a stream or DecodeAll.
func (e *Encoder) EncodeAll(src, dst []byte) []byte {
if len(src) == 0 {
+ if e.o.fullZero {
+ // Add frame header.
+ fh := frameHeader{
+ ContentSize: 0,
+ WindowSize: MinWindowSize,
+ SingleSegment: true,
+ // Adding a checksum would be a waste of space.
+ Checksum: false,
+ DictID: 0,
+ }
+ dst, _ = fh.appendTo(dst)
+
+ // Write raw block as last one only.
+ var blk blockHeader
+ blk.setSize(0)
+ blk.setType(blockTypeRaw)
+ blk.setLast(true)
+ dst = blk.appendTo(dst)
+ }
return dst
}
e.init.Do(func() {
@@ -453,7 +473,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
- err = blk.encode()
+ err = blk.encode(e.o.noEntropy)
}
switch err {
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index a8559e900..40eb45733 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -1,6 +1,7 @@
package zstd
import (
+ "errors"
"fmt"
"runtime"
"strings"
@@ -18,6 +19,8 @@ type encoderOptions struct {
blockSize int
windowSize int
level EncoderLevel
+ fullZero bool
+ noEntropy bool
}
func (o *encoderOptions) setDefault() {
@@ -63,6 +66,30 @@ func WithEncoderConcurrency(n int) EOption {
}
}
+// WithWindowSize will set the maximum allowed back-reference distance.
+// The value must be a power of two between WindowSizeMin and WindowSizeMax.
+// A larger value will enable better compression but allocate more memory and,
+// for above-default values, take considerably longer.
+// The default value is determined by the compression level.
+func WithWindowSize(n int) EOption {
+ return func(o *encoderOptions) error {
+ switch {
+ case n < MinWindowSize:
+ return fmt.Errorf("window size must be at least %d", MinWindowSize)
+ case n > MaxWindowSize:
+ return fmt.Errorf("window size must be at most %d", MaxWindowSize)
+ case (n & (n - 1)) != 0:
+ return errors.New("window size must be a power of 2")
+ }
+
+ o.windowSize = n
+ if o.blockSize > o.windowSize {
+ o.blockSize = o.windowSize
+ }
+ return nil
+ }
+}
+
// WithEncoderPadding will add padding to all output so the size will be a multiple of n.
// This can be used to obfuscate the exact output size or make blocks of a certain size.
// The contents will be a skippable frame, so it will be invisible by the decoder.
@@ -166,6 +193,26 @@ func WithEncoderLevel(l EncoderLevel) EOption {
}
}
+// WithZeroFrames will encode 0 length input as full frames.
+// This can be needed for compatibility with zstandard usage,
+// but is not needed for this package.
+func WithZeroFrames(b bool) EOption {
+ return func(o *encoderOptions) error {
+ o.fullZero = b
+ return nil
+ }
+}
+
+// WithNoEntropyCompression will always skip entropy compression of literals.
+// This can be useful if content has matches, but unlikely to benefit from entropy
+// compression. Usually the slight speed improvement is not worth enabling this.
+func WithNoEntropyCompression(b bool) EOption {
+ return func(o *encoderOptions) error {
+ o.noEntropy = b
+ return nil
+ }
+}
+
// WithSingleSegment will set the "single segment" flag when EncodeAll is used.
// If this flag is set, data must be regenerated within a single continuous memory segment.
// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present.
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 839a95fbf..40790747a 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -49,7 +49,8 @@ type frameDec struct {
const (
// The minimum Window_Size is 1 KB.
- minWindowSize = 1 << 10
+ MinWindowSize = 1 << 10
+ MaxWindowSize = 1 << 30
)
var (
@@ -60,7 +61,7 @@ var (
func newFrameDec(o decoderOptions) *frameDec {
d := frameDec{
o: o,
- maxWindowSize: 1 << 30,
+ maxWindowSize: MaxWindowSize,
}
if d.maxWindowSize > o.maxDecodedSize {
d.maxWindowSize = o.maxDecodedSize
@@ -193,14 +194,14 @@ func (d *frameDec) reset(br byteBuffer) error {
// When FCS_Field_Size is 2, the offset of 256 is added.
d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
case 4:
- d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3] << 24))
+ d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24)
case 8:
d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
}
if debug {
- println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]))
+ println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize)
}
}
// Move this to shared.
@@ -215,8 +216,8 @@ func (d *frameDec) reset(br byteBuffer) error {
if d.WindowSize == 0 && d.SingleSegment {
// We may not need window in this case.
d.WindowSize = d.FrameContentSize
- if d.WindowSize < minWindowSize {
- d.WindowSize = minWindowSize
+ if d.WindowSize < MinWindowSize {
+ d.WindowSize = MinWindowSize
}
}
@@ -225,7 +226,7 @@ func (d *frameDec) reset(br byteBuffer) error {
return ErrWindowSizeExceeded
}
// The minimum Window_Size is 1 KB.
- if d.WindowSize < minWindowSize {
+ if d.WindowSize < MinWindowSize {
println("got window size: ", d.WindowSize)
return ErrWindowSizeTooSmall
}
@@ -309,7 +310,9 @@ func (d *frameDec) checkCRC() error {
}
return ErrCRCMismatch
}
- println("CRC ok")
+ if debug {
+ println("CRC ok", tmp[:])
+ }
return nil
}
@@ -411,6 +414,7 @@ func (d *frameDec) startDecoder(output chan decodeOutput) {
}
written += int64(len(r.b))
if d.SingleSegment && uint64(written) > d.FrameContentSize {
+ println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize)
r.err = ErrFrameSizeExceeded
output <- r
return
@@ -461,6 +465,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
break
}
if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
+ println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
err = ErrFrameSizeExceeded
break
}
@@ -473,9 +478,10 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if err == nil {
if n != len(dst)-crcStart {
err = io.ErrShortWrite
+ } else {
+ err = d.checkCRC()
}
}
- err = d.checkCRC()
}
}
d.history.b = saved
diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go
index acac32527..4479cfe18 100644
--- a/vendor/github.com/klauspost/compress/zstd/frameenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go
@@ -5,7 +5,6 @@
package zstd
import (
- "errors"
"fmt"
"io"
"math"
@@ -49,9 +48,7 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3
dst = append(dst, uint8(windowLog))
}
- if f.SingleSegment && f.ContentSize == 0 {
- return nil, errors.New("single segment, but no size set")
- }
+
switch fcs {
case 0:
if f.SingleSegment {
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
index dfa6cf7ce..619836f52 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
@@ -502,13 +502,22 @@ func (s *fseEncoder) validateNorm() (err error) {
// writeCount will write the normalized histogram count to header.
// This is read back by readNCount.
func (s *fseEncoder) writeCount(out []byte) ([]byte, error) {
+ if s.useRLE {
+ return append(out, s.rleVal), nil
+ }
+ if s.preDefined || s.reUsed {
+ // Never write predefined.
+ return out, nil
+ }
+
var (
tableLog = s.actualTableLog
tableSize = 1 << tableLog
previous0 bool
charnum uint16
- maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
+ // maximum header size plus 2 extra bytes for final output if bitCount == 0.
+ maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + 2
// Write Table Size
bitStream = uint32(tableLog - minEncTablelog)
@@ -516,15 +525,12 @@ func (s *fseEncoder) writeCount(out []byte) ([]byte, error) {
remaining = int16(tableSize + 1) /* +1 for extra accuracy */
threshold = int16(tableSize)
nbBits = uint(tableLog + 1)
+ outP = len(out)
)
- if s.useRLE {
- return append(out, s.rleVal), nil
+ if cap(out) < outP+maxHeaderSize {
+ out = append(out, make([]byte, maxHeaderSize*3)...)
+ out = out[:len(out)-maxHeaderSize*3]
}
- if s.preDefined || s.reUsed {
- // Never write predefined.
- return out, nil
- }
- outP := len(out)
out = out[:outP+maxHeaderSize]
// stops at 1
@@ -594,11 +600,14 @@ func (s *fseEncoder) writeCount(out []byte) ([]byte, error) {
}
}
+ if outP+2 > len(out) {
+ return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen])
+ }
out[outP] = byte(bitStream)
out[outP+1] = byte(bitStream >> 8)
outP += int((bitCount + 7) / 8)
- if uint16(charnum) > s.symbolLen {
+ if charnum > s.symbolLen {
return nil, errors.New("internal error: charnum > s.symbolLen")
}
return out[:outP], nil
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
index 5186de802..6c17dc17f 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
@@ -7,6 +7,7 @@ package zstd
import (
"fmt"
"math"
+ "sync"
)
var (
@@ -69,85 +70,89 @@ func fillBase(dst []baseOffset, base uint32, bits ...uint8) {
}
}
-func init() {
- // Literals length codes
- tmp := make([]baseOffset, 36)
- for i := range tmp[:16] {
- tmp[i] = baseOffset{
- baseLine: uint32(i),
- addBits: 0,
+var predef sync.Once
+
+func initPredefined() {
+ predef.Do(func() {
+ // Literals length codes
+ tmp := make([]baseOffset, 36)
+ for i := range tmp[:16] {
+ tmp[i] = baseOffset{
+ baseLine: uint32(i),
+ addBits: 0,
+ }
}
- }
- fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
- symbolTableX[tableLiteralLengths] = tmp
-
- // Match length codes
- tmp = make([]baseOffset, 53)
- for i := range tmp[:32] {
- tmp[i] = baseOffset{
- // The transformation adds the 3 length.
- baseLine: uint32(i) + 3,
- addBits: 0,
- }
- }
- fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
- symbolTableX[tableMatchLengths] = tmp
-
- // Offset codes
- tmp = make([]baseOffset, maxOffsetBits+1)
- tmp[1] = baseOffset{
- baseLine: 1,
- addBits: 1,
- }
- fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)
- symbolTableX[tableOffsets] = tmp
-
- // Fill predefined tables and transform them.
- // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
- for i := range fsePredef[:] {
- f := &fsePredef[i]
- switch tableIndex(i) {
- case tableLiteralLengths:
- // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243
- f.actualTableLog = 6
- copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
- -1, -1, -1, -1})
- f.symbolLen = 36
- case tableOffsets:
- // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281
- f.actualTableLog = 5
- copy(f.norm[:], []int16{
- 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1})
- f.symbolLen = 29
- case tableMatchLengths:
- //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304
- f.actualTableLog = 6
- copy(f.norm[:], []int16{
- 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
- -1, -1, -1, -1, -1})
- f.symbolLen = 53
+ fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
+ symbolTableX[tableLiteralLengths] = tmp
+
+ // Match length codes
+ tmp = make([]baseOffset, 53)
+ for i := range tmp[:32] {
+ tmp[i] = baseOffset{
+ // The transformation adds the 3 length.
+ baseLine: uint32(i) + 3,
+ addBits: 0,
+ }
}
- if err := f.buildDtable(); err != nil {
- panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+ fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
+ symbolTableX[tableMatchLengths] = tmp
+
+ // Offset codes
+ tmp = make([]baseOffset, maxOffsetBits+1)
+ tmp[1] = baseOffset{
+ baseLine: 1,
+ addBits: 1,
}
- if err := f.transform(symbolTableX[i]); err != nil {
- panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+ fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)
+ symbolTableX[tableOffsets] = tmp
+
+ // Fill predefined tables and transform them.
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+ for i := range fsePredef[:] {
+ f := &fsePredef[i]
+ switch tableIndex(i) {
+ case tableLiteralLengths:
+ // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243
+ f.actualTableLog = 6
+ copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
+ -1, -1, -1, -1})
+ f.symbolLen = 36
+ case tableOffsets:
+ // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281
+ f.actualTableLog = 5
+ copy(f.norm[:], []int16{
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1})
+ f.symbolLen = 29
+ case tableMatchLengths:
+ //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304
+ f.actualTableLog = 6
+ copy(f.norm[:], []int16{
+ 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
+ -1, -1, -1, -1, -1})
+ f.symbolLen = 53
+ }
+ if err := f.buildDtable(); err != nil {
+ panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+ }
+ if err := f.transform(symbolTableX[i]); err != nil {
+ panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+ }
+ f.preDefined = true
+
+ // Create encoder as well
+ enc := &fsePredefEnc[i]
+ copy(enc.norm[:], f.norm[:])
+ enc.symbolLen = f.symbolLen
+ enc.actualTableLog = f.actualTableLog
+ if err := enc.buildCTable(); err != nil {
+ panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err))
+ }
+ enc.setBits(bitTables[i])
+ enc.preDefined = true
}
- f.preDefined = true
-
- // Create encoder as well
- enc := &fsePredefEnc[i]
- copy(enc.norm[:], f.norm[:])
- enc.symbolLen = f.symbolLen
- enc.actualTableLog = f.actualTableLog
- if err := enc.buildCTable(); err != nil {
- panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err))
- }
- enc.setBits(bitTables[i])
- enc.preDefined = true
- }
+ })
}
diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go
index e9e518570..356956ba2 100644
--- a/vendor/github.com/klauspost/compress/zstd/snappy.go
+++ b/vendor/github.com/klauspost/compress/zstd/snappy.go
@@ -80,6 +80,7 @@ type SnappyConverter struct {
// If any error is detected on the Snappy stream it is returned.
// The number of bytes written is returned.
func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
+ initPredefined()
r.err = nil
r.r = in
if r.block == nil {
@@ -110,7 +111,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
// Add empty last block
r.block.reset(nil)
r.block.last = true
- err := r.block.encodeLits()
+ err := r.block.encodeLits(false)
if err != nil {
return written, err
}
@@ -177,7 +178,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
r.err = ErrSnappyCorrupt
return written, r.err
}
- err = r.block.encode()
+ err = r.block.encode(false)
switch err {
case errIncompressible:
r.block.popOffsets()
@@ -187,7 +188,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
println("snappy.Decode:", err)
return written, err
}
- err = r.block.encodeLits()
+ err = r.block.encodeLits(false)
if err != nil {
return written, err
}
@@ -234,7 +235,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
r.err = ErrSnappyCorrupt
return written, r.err
}
- err := r.block.encodeLits()
+ err := r.block.encodeLits(false)
if err != nil {
return written, err
}
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index b975954c1..57a8a2f5b 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -11,6 +11,7 @@ import (
const debug = false
const debugSequences = false
+const debugMatches = false
// force encoder to use predefined tables.
const forcePreDef = false
diff --git a/vendor/github.com/klauspost/cpuid/.gitignore b/vendor/github.com/klauspost/cpuid/.gitignore
deleted file mode 100644
index daf913b1b..000000000
--- a/vendor/github.com/klauspost/cpuid/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
diff --git a/vendor/github.com/klauspost/cpuid/.travis.yml b/vendor/github.com/klauspost/cpuid/.travis.yml
deleted file mode 100644
index 630192d59..000000000
--- a/vendor/github.com/klauspost/cpuid/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-language: go
-
-sudo: false
-
-os:
- - linux
- - osx
-go:
- - 1.8.x
- - 1.9.x
- - 1.10.x
- - master
-
-script:
- - go vet ./...
- - go test -v ./...
- - go test -race ./...
- - diff <(gofmt -d .) <("")
-
-matrix:
- allow_failures:
- - go: 'master'
- fast_finish: true
diff --git a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt
deleted file mode 100644
index 2ef4714f7..000000000
--- a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2015- Klaus Post & Contributors.
-Email: klauspost@gmail.com
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
diff --git a/vendor/github.com/klauspost/cpuid/LICENSE b/vendor/github.com/klauspost/cpuid/LICENSE
deleted file mode 100644
index 5cec7ee94..000000000
--- a/vendor/github.com/klauspost/cpuid/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Klaus Post
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
diff --git a/vendor/github.com/klauspost/cpuid/README.md b/vendor/github.com/klauspost/cpuid/README.md
deleted file mode 100644
index a7fb41fbe..000000000
--- a/vendor/github.com/klauspost/cpuid/README.md
+++ /dev/null
@@ -1,147 +0,0 @@
-# cpuid
-Package cpuid provides information about the CPU running the current program.
-
-CPU features are detected on startup, and kept for fast access through the life of the application.
-Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use.
-
-You can access the CPU information by accessing the shared CPU variable of the cpuid library.
-
-Package home: https://github.com/klauspost/cpuid
-
-[![GoDoc][1]][2] [![Build Status][3]][4]
-
-[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg
-[2]: https://godoc.org/github.com/klauspost/cpuid
-[3]: https://travis-ci.org/klauspost/cpuid.svg
-[4]: https://travis-ci.org/klauspost/cpuid
-
-# features
-## CPU Instructions
-* **CMOV** (i686 CMOV)
-* **NX** (NX (No-Execute) bit)
-* **AMD3DNOW** (AMD 3DNOW)
-* **AMD3DNOWEXT** (AMD 3DNowExt)
-* **MMX** (standard MMX)
-* **MMXEXT** (SSE integer functions or AMD MMX ext)
-* **SSE** (SSE functions)
-* **SSE2** (P4 SSE functions)
-* **SSE3** (Prescott SSE3 functions)
-* **SSSE3** (Conroe SSSE3 functions)
-* **SSE4** (Penryn SSE4.1 functions)
-* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions)
-* **SSE42** (Nehalem SSE4.2 functions)
-* **AVX** (AVX functions)
-* **AVX2** (AVX2 functions)
-* **FMA3** (Intel FMA 3)
-* **FMA4** (Bulldozer FMA4 functions)
-* **XOP** (Bulldozer XOP functions)
-* **F16C** (Half-precision floating-point conversion)
-* **BMI1** (Bit Manipulation Instruction Set 1)
-* **BMI2** (Bit Manipulation Instruction Set 2)
-* **TBM** (AMD Trailing Bit Manipulation)
-* **LZCNT** (LZCNT instruction)
-* **POPCNT** (POPCNT instruction)
-* **AESNI** (Advanced Encryption Standard New Instructions)
-* **CLMUL** (Carry-less Multiplication)
-* **HTT** (Hyperthreading (enabled))
-* **HLE** (Hardware Lock Elision)
-* **RTM** (Restricted Transactional Memory)
-* **RDRAND** (RDRAND instruction is available)
-* **RDSEED** (RDSEED instruction is available)
-* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions))
-* **SHA** (Intel SHA Extensions)
-* **AVX512F** (AVX-512 Foundation)
-* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions)
-* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions)
-* **AVX512PF** (AVX-512 Prefetch Instructions)
-* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions)
-* **AVX512CD** (AVX-512 Conflict Detection Instructions)
-* **AVX512BW** (AVX-512 Byte and Word Instructions)
-* **AVX512VL** (AVX-512 Vector Length Extensions)
-* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions)
-* **MPX** (Intel MPX (Memory Protection Extensions))
-* **ERMS** (Enhanced REP MOVSB/STOSB)
-* **RDTSCP** (RDTSCP Instruction)
-* **CX16** (CMPXCHG16B Instruction)
-* **SGX** (Software Guard Extensions, with activation details)
-
-## Performance
-* **RDTSCP()** Returns current cycle count. Can be used for benchmarking.
-* **SSE2SLOW** (SSE2 is supported, but usually not faster)
-* **SSE3SLOW** (SSE3 is supported, but usually not faster)
-* **ATOM** (Atom processor, some SSSE3 instructions are slower)
-* **Cache line** (Probable size of a cache line).
-* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs.
-
-## Cpu Vendor/VM
-* **Intel**
-* **AMD**
-* **VIA**
-* **Transmeta**
-* **NSC**
-* **KVM** (Kernel-based Virtual Machine)
-* **MSVM** (Microsoft Hyper-V or Windows Virtual PC)
-* **VMware**
-* **XenHVM**
-* **Bhyve**
-* **Hygon**
-
-# installing
-
-```go get github.com/klauspost/cpuid```
-
-# example
-
-```Go
-package main
-
-import (
- "fmt"
- "github.com/klauspost/cpuid"
-)
-
-func main() {
- // Print basic CPU information:
- fmt.Println("Name:", cpuid.CPU.BrandName)
- fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores)
- fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore)
- fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores)
- fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model)
- fmt.Println("Features:", cpuid.CPU.Features)
- fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine)
- fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes")
- fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes")
- fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes")
- fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes")
-
- // Test if we have a specific feature:
- if cpuid.CPU.SSE() {
- fmt.Println("We have Streaming SIMD Extensions")
- }
-}
-```
-
-Sample output:
-```
->go run main.go
-Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz
-PhysicalCores: 2
-ThreadsPerCore: 2
-LogicalCores: 4
-Family 6 Model: 42
-Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL
-Cacheline bytes: 64
-We have Streaming SIMD Extensions
-```
-
-# private package
-
-In the "private" folder you can find an autogenerated version of the library you can include in your own packages.
-
-For this purpose all exports are removed, and functions and constants are lowercased.
-
-This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages.
-
-# license
-
-This code is published under an MIT license. See LICENSE file for more information.
diff --git a/vendor/github.com/klauspost/cpuid/cpuid.go b/vendor/github.com/klauspost/cpuid/cpuid.go
deleted file mode 100644
index db9591321..000000000
--- a/vendor/github.com/klauspost/cpuid/cpuid.go
+++ /dev/null
@@ -1,1049 +0,0 @@
-// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
-
-// Package cpuid provides information about the CPU running the current program.
-//
-// CPU features are detected on startup, and kept for fast access through the life of the application.
-// Currently x86 / x64 (AMD64) is supported.
-//
-// You can access the CPU information by accessing the shared CPU variable of the cpuid library.
-//
-// Package home: https://github.com/klauspost/cpuid
-package cpuid
-
-import "strings"
-
-// Vendor is a representation of a CPU vendor.
-type Vendor int
-
-const (
- Other Vendor = iota
- Intel
- AMD
- VIA
- Transmeta
- NSC
- KVM // Kernel-based Virtual Machine
- MSVM // Microsoft Hyper-V or Windows Virtual PC
- VMware
- XenHVM
- Bhyve
- Hygon
-)
-
-const (
- CMOV = 1 << iota // i686 CMOV
- NX // NX (No-Execute) bit
- AMD3DNOW // AMD 3DNOW
- AMD3DNOWEXT // AMD 3DNowExt
- MMX // standard MMX
- MMXEXT // SSE integer functions or AMD MMX ext
- SSE // SSE functions
- SSE2 // P4 SSE functions
- SSE3 // Prescott SSE3 functions
- SSSE3 // Conroe SSSE3 functions
- SSE4 // Penryn SSE4.1 functions
- SSE4A // AMD Barcelona microarchitecture SSE4a instructions
- SSE42 // Nehalem SSE4.2 functions
- AVX // AVX functions
- AVX2 // AVX2 functions
- FMA3 // Intel FMA 3
- FMA4 // Bulldozer FMA4 functions
- XOP // Bulldozer XOP functions
- F16C // Half-precision floating-point conversion
- BMI1 // Bit Manipulation Instruction Set 1
- BMI2 // Bit Manipulation Instruction Set 2
- TBM // AMD Trailing Bit Manipulation
- LZCNT // LZCNT instruction
- POPCNT // POPCNT instruction
- AESNI // Advanced Encryption Standard New Instructions
- CLMUL // Carry-less Multiplication
- HTT // Hyperthreading (enabled)
- HLE // Hardware Lock Elision
- RTM // Restricted Transactional Memory
- RDRAND // RDRAND instruction is available
- RDSEED // RDSEED instruction is available
- ADX // Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
- SHA // Intel SHA Extensions
- AVX512F // AVX-512 Foundation
- AVX512DQ // AVX-512 Doubleword and Quadword Instructions
- AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions
- AVX512PF // AVX-512 Prefetch Instructions
- AVX512ER // AVX-512 Exponential and Reciprocal Instructions
- AVX512CD // AVX-512 Conflict Detection Instructions
- AVX512BW // AVX-512 Byte and Word Instructions
- AVX512VL // AVX-512 Vector Length Extensions
- AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions
- MPX // Intel MPX (Memory Protection Extensions)
- ERMS // Enhanced REP MOVSB/STOSB
- RDTSCP // RDTSCP Instruction
- CX16 // CMPXCHG16B Instruction
- SGX // Software Guard Extensions
- IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB)
- STIBP // Single Thread Indirect Branch Predictors
-
- // Performance indicators
- SSE2SLOW // SSE2 is supported, but usually not faster
- SSE3SLOW // SSE3 is supported, but usually not faster
- ATOM // Atom processor, some SSSE3 instructions are slower
-)
-
-var flagNames = map[Flags]string{
- CMOV: "CMOV", // i686 CMOV
- NX: "NX", // NX (No-Execute) bit
- AMD3DNOW: "AMD3DNOW", // AMD 3DNOW
- AMD3DNOWEXT: "AMD3DNOWEXT", // AMD 3DNowExt
- MMX: "MMX", // Standard MMX
- MMXEXT: "MMXEXT", // SSE integer functions or AMD MMX ext
- SSE: "SSE", // SSE functions
- SSE2: "SSE2", // P4 SSE2 functions
- SSE3: "SSE3", // Prescott SSE3 functions
- SSSE3: "SSSE3", // Conroe SSSE3 functions
- SSE4: "SSE4.1", // Penryn SSE4.1 functions
- SSE4A: "SSE4A", // AMD Barcelona microarchitecture SSE4a instructions
- SSE42: "SSE4.2", // Nehalem SSE4.2 functions
- AVX: "AVX", // AVX functions
- AVX2: "AVX2", // AVX functions
- FMA3: "FMA3", // Intel FMA 3
- FMA4: "FMA4", // Bulldozer FMA4 functions
- XOP: "XOP", // Bulldozer XOP functions
- F16C: "F16C", // Half-precision floating-point conversion
- BMI1: "BMI1", // Bit Manipulation Instruction Set 1
- BMI2: "BMI2", // Bit Manipulation Instruction Set 2
- TBM: "TBM", // AMD Trailing Bit Manipulation
- LZCNT: "LZCNT", // LZCNT instruction
- POPCNT: "POPCNT", // POPCNT instruction
- AESNI: "AESNI", // Advanced Encryption Standard New Instructions
- CLMUL: "CLMUL", // Carry-less Multiplication
- HTT: "HTT", // Hyperthreading (enabled)
- HLE: "HLE", // Hardware Lock Elision
- RTM: "RTM", // Restricted Transactional Memory
- RDRAND: "RDRAND", // RDRAND instruction is available
- RDSEED: "RDSEED", // RDSEED instruction is available
- ADX: "ADX", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
- SHA: "SHA", // Intel SHA Extensions
- AVX512F: "AVX512F", // AVX-512 Foundation
- AVX512DQ: "AVX512DQ", // AVX-512 Doubleword and Quadword Instructions
- AVX512IFMA: "AVX512IFMA", // AVX-512 Integer Fused Multiply-Add Instructions
- AVX512PF: "AVX512PF", // AVX-512 Prefetch Instructions
- AVX512ER: "AVX512ER", // AVX-512 Exponential and Reciprocal Instructions
- AVX512CD: "AVX512CD", // AVX-512 Conflict Detection Instructions
- AVX512BW: "AVX512BW", // AVX-512 Byte and Word Instructions
- AVX512VL: "AVX512VL", // AVX-512 Vector Length Extensions
- AVX512VBMI: "AVX512VBMI", // AVX-512 Vector Bit Manipulation Instructions
- MPX: "MPX", // Intel MPX (Memory Protection Extensions)
- ERMS: "ERMS", // Enhanced REP MOVSB/STOSB
- RDTSCP: "RDTSCP", // RDTSCP Instruction
- CX16: "CX16", // CMPXCHG16B Instruction
- SGX: "SGX", // Software Guard Extensions
- IBPB: "IBPB", // Indirect Branch Restricted Speculation and Indirect Branch Predictor Barrier
- STIBP: "STIBP", // Single Thread Indirect Branch Predictors
-
- // Performance indicators
- SSE2SLOW: "SSE2SLOW", // SSE2 supported, but usually not faster
- SSE3SLOW: "SSE3SLOW", // SSE3 supported, but usually not faster
- ATOM: "ATOM", // Atom processor, some SSSE3 instructions are slower
-
-}
-
-// CPUInfo contains information about the detected system CPU.
-type CPUInfo struct {
- BrandName string // Brand name reported by the CPU
- VendorID Vendor // Comparable CPU vendor ID
- Features Flags // Features of the CPU
- PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable.
- ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable.
- LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
- Family int // CPU family number
- Model int // CPU model number
- CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
- Cache struct {
- L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected
- L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected
- L2 int // L2 Cache (per core or shared). Will be -1 if undetected
- L3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected
- }
- SGX SGXSupport
- maxFunc uint32
- maxExFunc uint32
-}
-
-var cpuid func(op uint32) (eax, ebx, ecx, edx uint32)
-var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32)
-var xgetbv func(index uint32) (eax, edx uint32)
-var rdtscpAsm func() (eax, ebx, ecx, edx uint32)
-
-// CPU contains information about the CPU as detected on startup,
-// or when Detect last was called.
-//
-// Use this as the primary entry point to you data,
-// this way queries are
-var CPU CPUInfo
-
-func init() {
- initCPU()
- Detect()
-}
-
-// Detect will re-detect current CPU info.
-// This will replace the content of the exported CPU variable.
-//
-// Unless you expect the CPU to change while you are running your program
-// you should not need to call this function.
-// If you call this, you must ensure that no other goroutine is accessing the
-// exported CPU variable.
-func Detect() {
- CPU.maxFunc = maxFunctionID()
- CPU.maxExFunc = maxExtendedFunction()
- CPU.BrandName = brandName()
- CPU.CacheLine = cacheLine()
- CPU.Family, CPU.Model = familyModel()
- CPU.Features = support()
- CPU.SGX = hasSGX(CPU.Features&SGX != 0)
- CPU.ThreadsPerCore = threadsPerCore()
- CPU.LogicalCores = logicalCores()
- CPU.PhysicalCores = physicalCores()
- CPU.VendorID = vendorID()
- CPU.cacheSize()
-}
-
-// Generated here: http://play.golang.org/p/BxFH2Gdc0G
-
-// Cmov indicates support of CMOV instructions
-func (c CPUInfo) Cmov() bool {
- return c.Features&CMOV != 0
-}
-
-// Amd3dnow indicates support of AMD 3DNOW! instructions
-func (c CPUInfo) Amd3dnow() bool {
- return c.Features&AMD3DNOW != 0
-}
-
-// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions
-func (c CPUInfo) Amd3dnowExt() bool {
- return c.Features&AMD3DNOWEXT != 0
-}
-
-// MMX indicates support of MMX instructions
-func (c CPUInfo) MMX() bool {
- return c.Features&MMX != 0
-}
-
-// MMXExt indicates support of MMXEXT instructions
-// (SSE integer functions or AMD MMX ext)
-func (c CPUInfo) MMXExt() bool {
- return c.Features&MMXEXT != 0
-}
-
-// SSE indicates support of SSE instructions
-func (c CPUInfo) SSE() bool {
- return c.Features&SSE != 0
-}
-
-// SSE2 indicates support of SSE 2 instructions
-func (c CPUInfo) SSE2() bool {
- return c.Features&SSE2 != 0
-}
-
-// SSE3 indicates support of SSE 3 instructions
-func (c CPUInfo) SSE3() bool {
- return c.Features&SSE3 != 0
-}
-
-// SSSE3 indicates support of SSSE 3 instructions
-func (c CPUInfo) SSSE3() bool {
- return c.Features&SSSE3 != 0
-}
-
-// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions
-func (c CPUInfo) SSE4() bool {
- return c.Features&SSE4 != 0
-}
-
-// SSE42 indicates support of SSE4.2 instructions
-func (c CPUInfo) SSE42() bool {
- return c.Features&SSE42 != 0
-}
-
-// AVX indicates support of AVX instructions
-// and operating system support of AVX instructions
-func (c CPUInfo) AVX() bool {
- return c.Features&AVX != 0
-}
-
-// AVX2 indicates support of AVX2 instructions
-func (c CPUInfo) AVX2() bool {
- return c.Features&AVX2 != 0
-}
-
-// FMA3 indicates support of FMA3 instructions
-func (c CPUInfo) FMA3() bool {
- return c.Features&FMA3 != 0
-}
-
-// FMA4 indicates support of FMA4 instructions
-func (c CPUInfo) FMA4() bool {
- return c.Features&FMA4 != 0
-}
-
-// XOP indicates support of XOP instructions
-func (c CPUInfo) XOP() bool {
- return c.Features&XOP != 0
-}
-
-// F16C indicates support of F16C instructions
-func (c CPUInfo) F16C() bool {
- return c.Features&F16C != 0
-}
-
-// BMI1 indicates support of BMI1 instructions
-func (c CPUInfo) BMI1() bool {
- return c.Features&BMI1 != 0
-}
-
-// BMI2 indicates support of BMI2 instructions
-func (c CPUInfo) BMI2() bool {
- return c.Features&BMI2 != 0
-}
-
-// TBM indicates support of TBM instructions
-// (AMD Trailing Bit Manipulation)
-func (c CPUInfo) TBM() bool {
- return c.Features&TBM != 0
-}
-
-// Lzcnt indicates support of LZCNT instruction
-func (c CPUInfo) Lzcnt() bool {
- return c.Features&LZCNT != 0
-}
-
-// Popcnt indicates support of POPCNT instruction
-func (c CPUInfo) Popcnt() bool {
- return c.Features&POPCNT != 0
-}
-
-// HTT indicates the processor has Hyperthreading enabled
-func (c CPUInfo) HTT() bool {
- return c.Features&HTT != 0
-}
-
-// SSE2Slow indicates that SSE2 may be slow on this processor
-func (c CPUInfo) SSE2Slow() bool {
- return c.Features&SSE2SLOW != 0
-}
-
-// SSE3Slow indicates that SSE3 may be slow on this processor
-func (c CPUInfo) SSE3Slow() bool {
- return c.Features&SSE3SLOW != 0
-}
-
-// AesNi indicates support of AES-NI instructions
-// (Advanced Encryption Standard New Instructions)
-func (c CPUInfo) AesNi() bool {
- return c.Features&AESNI != 0
-}
-
-// Clmul indicates support of CLMUL instructions
-// (Carry-less Multiplication)
-func (c CPUInfo) Clmul() bool {
- return c.Features&CLMUL != 0
-}
-
-// NX indicates support of NX (No-Execute) bit
-func (c CPUInfo) NX() bool {
- return c.Features&NX != 0
-}
-
-// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions
-func (c CPUInfo) SSE4A() bool {
- return c.Features&SSE4A != 0
-}
-
-// HLE indicates support of Hardware Lock Elision
-func (c CPUInfo) HLE() bool {
- return c.Features&HLE != 0
-}
-
-// RTM indicates support of Restricted Transactional Memory
-func (c CPUInfo) RTM() bool {
- return c.Features&RTM != 0
-}
-
-// Rdrand indicates support of RDRAND instruction is available
-func (c CPUInfo) Rdrand() bool {
- return c.Features&RDRAND != 0
-}
-
-// Rdseed indicates support of RDSEED instruction is available
-func (c CPUInfo) Rdseed() bool {
- return c.Features&RDSEED != 0
-}
-
-// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
-func (c CPUInfo) ADX() bool {
- return c.Features&ADX != 0
-}
-
-// SHA indicates support of Intel SHA Extensions
-func (c CPUInfo) SHA() bool {
- return c.Features&SHA != 0
-}
-
-// AVX512F indicates support of AVX-512 Foundation
-func (c CPUInfo) AVX512F() bool {
- return c.Features&AVX512F != 0
-}
-
-// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions
-func (c CPUInfo) AVX512DQ() bool {
- return c.Features&AVX512DQ != 0
-}
-
-// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions
-func (c CPUInfo) AVX512IFMA() bool {
- return c.Features&AVX512IFMA != 0
-}
-
-// AVX512PF indicates support of AVX-512 Prefetch Instructions
-func (c CPUInfo) AVX512PF() bool {
- return c.Features&AVX512PF != 0
-}
-
-// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions
-func (c CPUInfo) AVX512ER() bool {
- return c.Features&AVX512ER != 0
-}
-
-// AVX512CD indicates support of AVX-512 Conflict Detection Instructions
-func (c CPUInfo) AVX512CD() bool {
- return c.Features&AVX512CD != 0
-}
-
-// AVX512BW indicates support of AVX-512 Byte and Word Instructions
-func (c CPUInfo) AVX512BW() bool {
- return c.Features&AVX512BW != 0
-}
-
-// AVX512VL indicates support of AVX-512 Vector Length Extensions
-func (c CPUInfo) AVX512VL() bool {
- return c.Features&AVX512VL != 0
-}
-
-// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions
-func (c CPUInfo) AVX512VBMI() bool {
- return c.Features&AVX512VBMI != 0
-}
-
-// MPX indicates support of Intel MPX (Memory Protection Extensions)
-func (c CPUInfo) MPX() bool {
- return c.Features&MPX != 0
-}
-
-// ERMS indicates support of Enhanced REP MOVSB/STOSB
-func (c CPUInfo) ERMS() bool {
- return c.Features&ERMS != 0
-}
-
-// RDTSCP Instruction is available.
-func (c CPUInfo) RDTSCP() bool {
- return c.Features&RDTSCP != 0
-}
-
-// CX16 indicates if CMPXCHG16B instruction is available.
-func (c CPUInfo) CX16() bool {
- return c.Features&CX16 != 0
-}
-
-// TSX is split into HLE (Hardware Lock Elision) and RTM (Restricted Transactional Memory) detection.
-// So TSX simply checks that.
-func (c CPUInfo) TSX() bool {
- return c.Features&(HLE|RTM) == HLE|RTM
-}
-
-// Atom indicates an Atom processor
-func (c CPUInfo) Atom() bool {
- return c.Features&ATOM != 0
-}
-
-// Intel returns true if vendor is recognized as Intel
-func (c CPUInfo) Intel() bool {
- return c.VendorID == Intel
-}
-
-// AMD returns true if vendor is recognized as AMD
-func (c CPUInfo) AMD() bool {
- return c.VendorID == AMD
-}
-
-// Hygon returns true if vendor is recognized as Hygon
-func (c CPUInfo) Hygon() bool {
- return c.VendorID == Hygon
-}
-
-// Transmeta returns true if vendor is recognized as Transmeta
-func (c CPUInfo) Transmeta() bool {
- return c.VendorID == Transmeta
-}
-
-// NSC returns true if vendor is recognized as National Semiconductor
-func (c CPUInfo) NSC() bool {
- return c.VendorID == NSC
-}
-
-// VIA returns true if vendor is recognized as VIA
-func (c CPUInfo) VIA() bool {
- return c.VendorID == VIA
-}
-
-// RTCounter returns the 64-bit time-stamp counter
-// Uses the RDTSCP instruction. The value 0 is returned
-// if the CPU does not support the instruction.
-func (c CPUInfo) RTCounter() uint64 {
- if !c.RDTSCP() {
- return 0
- }
- a, _, _, d := rdtscpAsm()
- return uint64(a) | (uint64(d) << 32)
-}
-
-// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP.
-// This variable is OS dependent, but on Linux contains information
-// about the current cpu/core the code is running on.
-// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned.
-func (c CPUInfo) Ia32TscAux() uint32 {
- if !c.RDTSCP() {
- return 0
- }
- _, _, ecx, _ := rdtscpAsm()
- return ecx
-}
-
-// LogicalCPU will return the Logical CPU the code is currently executing on.
-// This is likely to change when the OS re-schedules the running thread
-// to another CPU.
-// If the current core cannot be detected, -1 will be returned.
-func (c CPUInfo) LogicalCPU() int {
- if c.maxFunc < 1 {
- return -1
- }
- _, ebx, _, _ := cpuid(1)
- return int(ebx >> 24)
-}
-
-// VM Will return true if the cpu id indicates we are in
-// a virtual machine. This is only a hint, and will very likely
-// have many false negatives.
-func (c CPUInfo) VM() bool {
- switch c.VendorID {
- case MSVM, KVM, VMware, XenHVM, Bhyve:
- return true
- }
- return false
-}
-
-// Flags contains detected cpu features and caracteristics
-type Flags uint64
-
-// String returns a string representation of the detected
-// CPU features.
-func (f Flags) String() string {
- return strings.Join(f.Strings(), ",")
-}
-
-// Strings returns and array of the detected features.
-func (f Flags) Strings() []string {
- s := support()
- r := make([]string, 0, 20)
- for i := uint(0); i < 64; i++ {
- key := Flags(1 << i)
- val := flagNames[key]
- if s&key != 0 {
- r = append(r, val)
- }
- }
- return r
-}
-
-func maxExtendedFunction() uint32 {
- eax, _, _, _ := cpuid(0x80000000)
- return eax
-}
-
-func maxFunctionID() uint32 {
- a, _, _, _ := cpuid(0)
- return a
-}
-
-func brandName() string {
- if maxExtendedFunction() >= 0x80000004 {
- v := make([]uint32, 0, 48)
- for i := uint32(0); i < 3; i++ {
- a, b, c, d := cpuid(0x80000002 + i)
- v = append(v, a, b, c, d)
- }
- return strings.Trim(string(valAsString(v...)), " ")
- }
- return "unknown"
-}
-
-func threadsPerCore() int {
- mfi := maxFunctionID()
- if mfi < 0x4 || vendorID() != Intel {
- return 1
- }
-
- if mfi < 0xb {
- _, b, _, d := cpuid(1)
- if (d & (1 << 28)) != 0 {
- // v will contain logical core count
- v := (b >> 16) & 255
- if v > 1 {
- a4, _, _, _ := cpuid(4)
- // physical cores
- v2 := (a4 >> 26) + 1
- if v2 > 0 {
- return int(v) / int(v2)
- }
- }
- }
- return 1
- }
- _, b, _, _ := cpuidex(0xb, 0)
- if b&0xffff == 0 {
- return 1
- }
- return int(b & 0xffff)
-}
-
-func logicalCores() int {
- mfi := maxFunctionID()
- switch vendorID() {
- case Intel:
- // Use this on old Intel processors
- if mfi < 0xb {
- if mfi < 1 {
- return 0
- }
- // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID)
- // that can be assigned to logical processors in a physical package.
- // The value may not be the same as the number of logical processors that are present in the hardware of a physical package.
- _, ebx, _, _ := cpuid(1)
- logical := (ebx >> 16) & 0xff
- return int(logical)
- }
- _, b, _, _ := cpuidex(0xb, 1)
- return int(b & 0xffff)
- case AMD, Hygon:
- _, b, _, _ := cpuid(1)
- return int((b >> 16) & 0xff)
- default:
- return 0
- }
-}
-
-func familyModel() (int, int) {
- if maxFunctionID() < 0x1 {
- return 0, 0
- }
- eax, _, _, _ := cpuid(1)
- family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff)
- model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0)
- return int(family), int(model)
-}
-
-func physicalCores() int {
- switch vendorID() {
- case Intel:
- return logicalCores() / threadsPerCore()
- case AMD, Hygon:
- if maxExtendedFunction() >= 0x80000008 {
- _, _, c, _ := cpuid(0x80000008)
- return int(c&0xff) + 1
- }
- }
- return 0
-}
-
-// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
-var vendorMapping = map[string]Vendor{
- "AMDisbetter!": AMD,
- "AuthenticAMD": AMD,
- "CentaurHauls": VIA,
- "GenuineIntel": Intel,
- "TransmetaCPU": Transmeta,
- "GenuineTMx86": Transmeta,
- "Geode by NSC": NSC,
- "VIA VIA VIA ": VIA,
- "KVMKVMKVMKVM": KVM,
- "Microsoft Hv": MSVM,
- "VMwareVMware": VMware,
- "XenVMMXenVMM": XenHVM,
- "bhyve bhyve ": Bhyve,
- "HygonGenuine": Hygon,
-}
-
-func vendorID() Vendor {
- _, b, c, d := cpuid(0)
- v := valAsString(b, d, c)
- vend, ok := vendorMapping[string(v)]
- if !ok {
- return Other
- }
- return vend
-}
-
-func cacheLine() int {
- if maxFunctionID() < 0x1 {
- return 0
- }
-
- _, ebx, _, _ := cpuid(1)
- cache := (ebx & 0xff00) >> 5 // cflush size
- if cache == 0 && maxExtendedFunction() >= 0x80000006 {
- _, _, ecx, _ := cpuid(0x80000006)
- cache = ecx & 0xff // cacheline size
- }
- // TODO: Read from Cache and TLB Information
- return int(cache)
-}
-
-func (c *CPUInfo) cacheSize() {
- c.Cache.L1D = -1
- c.Cache.L1I = -1
- c.Cache.L2 = -1
- c.Cache.L3 = -1
- vendor := vendorID()
- switch vendor {
- case Intel:
- if maxFunctionID() < 4 {
- return
- }
- for i := uint32(0); ; i++ {
- eax, ebx, ecx, _ := cpuidex(4, i)
- cacheType := eax & 15
- if cacheType == 0 {
- break
- }
- cacheLevel := (eax >> 5) & 7
- coherency := int(ebx&0xfff) + 1
- partitions := int((ebx>>12)&0x3ff) + 1
- associativity := int((ebx>>22)&0x3ff) + 1
- sets := int(ecx) + 1
- size := associativity * partitions * coherency * sets
- switch cacheLevel {
- case 1:
- if cacheType == 1 {
- // 1 = Data Cache
- c.Cache.L1D = size
- } else if cacheType == 2 {
- // 2 = Instruction Cache
- c.Cache.L1I = size
- } else {
- if c.Cache.L1D < 0 {
- c.Cache.L1I = size
- }
- if c.Cache.L1I < 0 {
- c.Cache.L1I = size
- }
- }
- case 2:
- c.Cache.L2 = size
- case 3:
- c.Cache.L3 = size
- }
- }
- case AMD, Hygon:
- // Untested.
- if maxExtendedFunction() < 0x80000005 {
- return
- }
- _, _, ecx, edx := cpuid(0x80000005)
- c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024)
- c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024)
-
- if maxExtendedFunction() < 0x80000006 {
- return
- }
- _, _, ecx, _ = cpuid(0x80000006)
- c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024)
- }
-
- return
-}
-
-type SGXSupport struct {
- Available bool
- SGX1Supported bool
- SGX2Supported bool
- MaxEnclaveSizeNot64 int64
- MaxEnclaveSize64 int64
-}
-
-func hasSGX(available bool) (rval SGXSupport) {
- rval.Available = available
-
- if !available {
- return
- }
-
- a, _, _, d := cpuidex(0x12, 0)
- rval.SGX1Supported = a&0x01 != 0
- rval.SGX2Supported = a&0x02 != 0
- rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2
- rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2
-
- return
-}
-
-func support() Flags {
- mfi := maxFunctionID()
- vend := vendorID()
- if mfi < 0x1 {
- return 0
- }
- rval := uint64(0)
- _, _, c, d := cpuid(1)
- if (d & (1 << 15)) != 0 {
- rval |= CMOV
- }
- if (d & (1 << 23)) != 0 {
- rval |= MMX
- }
- if (d & (1 << 25)) != 0 {
- rval |= MMXEXT
- }
- if (d & (1 << 25)) != 0 {
- rval |= SSE
- }
- if (d & (1 << 26)) != 0 {
- rval |= SSE2
- }
- if (c & 1) != 0 {
- rval |= SSE3
- }
- if (c & 0x00000200) != 0 {
- rval |= SSSE3
- }
- if (c & 0x00080000) != 0 {
- rval |= SSE4
- }
- if (c & 0x00100000) != 0 {
- rval |= SSE42
- }
- if (c & (1 << 25)) != 0 {
- rval |= AESNI
- }
- if (c & (1 << 1)) != 0 {
- rval |= CLMUL
- }
- if c&(1<<23) != 0 {
- rval |= POPCNT
- }
- if c&(1<<30) != 0 {
- rval |= RDRAND
- }
- if c&(1<<29) != 0 {
- rval |= F16C
- }
- if c&(1<<13) != 0 {
- rval |= CX16
- }
- if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 {
- if threadsPerCore() > 1 {
- rval |= HTT
- }
- }
-
- // Check XGETBV, OXSAVE and AVX bits
- if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 {
- // Check for OS support
- eax, _ := xgetbv(0)
- if (eax & 0x6) == 0x6 {
- rval |= AVX
- if (c & 0x00001000) != 0 {
- rval |= FMA3
- }
- }
- }
-
- // Check AVX2, AVX2 requires OS support, but BMI1/2 don't.
- if mfi >= 7 {
- _, ebx, ecx, edx := cpuidex(7, 0)
- if (rval&AVX) != 0 && (ebx&0x00000020) != 0 {
- rval |= AVX2
- }
- if (ebx & 0x00000008) != 0 {
- rval |= BMI1
- if (ebx & 0x00000100) != 0 {
- rval |= BMI2
- }
- }
- if ebx&(1<<2) != 0 {
- rval |= SGX
- }
- if ebx&(1<<4) != 0 {
- rval |= HLE
- }
- if ebx&(1<<9) != 0 {
- rval |= ERMS
- }
- if ebx&(1<<11) != 0 {
- rval |= RTM
- }
- if ebx&(1<<14) != 0 {
- rval |= MPX
- }
- if ebx&(1<<18) != 0 {
- rval |= RDSEED
- }
- if ebx&(1<<19) != 0 {
- rval |= ADX
- }
- if ebx&(1<<29) != 0 {
- rval |= SHA
- }
- if edx&(1<<26) != 0 {
- rval |= IBPB
- }
- if edx&(1<<27) != 0 {
- rval |= STIBP
- }
-
- // Only detect AVX-512 features if XGETBV is supported
- if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
- // Check for OS support
- eax, _ := xgetbv(0)
-
- // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and
- // ZMM16-ZMM31 state are enabled by OS)
- /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS).
- if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 {
- if ebx&(1<<16) != 0 {
- rval |= AVX512F
- }
- if ebx&(1<<17) != 0 {
- rval |= AVX512DQ
- }
- if ebx&(1<<21) != 0 {
- rval |= AVX512IFMA
- }
- if ebx&(1<<26) != 0 {
- rval |= AVX512PF
- }
- if ebx&(1<<27) != 0 {
- rval |= AVX512ER
- }
- if ebx&(1<<28) != 0 {
- rval |= AVX512CD
- }
- if ebx&(1<<30) != 0 {
- rval |= AVX512BW
- }
- if ebx&(1<<31) != 0 {
- rval |= AVX512VL
- }
- // ecx
- if ecx&(1<<1) != 0 {
- rval |= AVX512VBMI
- }
- }
- }
- }
-
- if maxExtendedFunction() >= 0x80000001 {
- _, _, c, d := cpuid(0x80000001)
- if (c & (1 << 5)) != 0 {
- rval |= LZCNT
- rval |= POPCNT
- }
- if (d & (1 << 31)) != 0 {
- rval |= AMD3DNOW
- }
- if (d & (1 << 30)) != 0 {
- rval |= AMD3DNOWEXT
- }
- if (d & (1 << 23)) != 0 {
- rval |= MMX
- }
- if (d & (1 << 22)) != 0 {
- rval |= MMXEXT
- }
- if (c & (1 << 6)) != 0 {
- rval |= SSE4A
- }
- if d&(1<<20) != 0 {
- rval |= NX
- }
- if d&(1<<27) != 0 {
- rval |= RDTSCP
- }
-
- /* Allow for selectively disabling SSE2 functions on AMD processors
- with SSE2 support but not SSE4a. This includes Athlon64, some
- Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
- than SSE2 often enough to utilize this special-case flag.
- AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
- so that SSE2 is used unless explicitly disabled by checking
- AV_CPU_FLAG_SSE2SLOW. */
- if vendorID() != Intel &&
- rval&SSE2 != 0 && (c&0x00000040) == 0 {
- rval |= SSE2SLOW
- }
-
- /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
- * used unless the OS has AVX support. */
- if (rval & AVX) != 0 {
- if (c & 0x00000800) != 0 {
- rval |= XOP
- }
- if (c & 0x00010000) != 0 {
- rval |= FMA4
- }
- }
-
- if vendorID() == Intel {
- family, model := familyModel()
- if family == 6 && (model == 9 || model == 13 || model == 14) {
- /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
- * 6/14 (core1 "yonah") theoretically support sse2, but it's
- * usually slower than mmx. */
- if (rval & SSE2) != 0 {
- rval |= SSE2SLOW
- }
- if (rval & SSE3) != 0 {
- rval |= SSE3SLOW
- }
- }
- /* The Atom processor has SSSE3 support, which is useful in many cases,
- * but sometimes the SSSE3 version is slower than the SSE2 equivalent
- * on the Atom, but is generally faster on other processors supporting
- * SSSE3. This flag allows for selectively disabling certain SSSE3
- * functions on the Atom. */
- if family == 6 && model == 28 {
- rval |= ATOM
- }
- }
- }
- return Flags(rval)
-}
-
-func valAsString(values ...uint32) []byte {
- r := make([]byte, 4*len(values))
- for i, v := range values {
- dst := r[i*4:]
- dst[0] = byte(v & 0xff)
- dst[1] = byte((v >> 8) & 0xff)
- dst[2] = byte((v >> 16) & 0xff)
- dst[3] = byte((v >> 24) & 0xff)
- switch {
- case dst[0] == 0:
- return r[:i*4]
- case dst[1] == 0:
- return r[:i*4+1]
- case dst[2] == 0:
- return r[:i*4+2]
- case dst[3] == 0:
- return r[:i*4+3]
- }
- }
- return r
-}
diff --git a/vendor/github.com/klauspost/cpuid/cpuid_386.s b/vendor/github.com/klauspost/cpuid/cpuid_386.s
deleted file mode 100644
index 4d731711e..000000000
--- a/vendor/github.com/klauspost/cpuid/cpuid_386.s
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
-
-// +build 386,!gccgo
-
-// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
-TEXT ·asmCpuid(SB), 7, $0
- XORL CX, CX
- MOVL op+0(FP), AX
- CPUID
- MOVL AX, eax+4(FP)
- MOVL BX, ebx+8(FP)
- MOVL CX, ecx+12(FP)
- MOVL DX, edx+16(FP)
- RET
-
-// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
-TEXT ·asmCpuidex(SB), 7, $0
- MOVL op+0(FP), AX
- MOVL op2+4(FP), CX
- CPUID
- MOVL AX, eax+8(FP)
- MOVL BX, ebx+12(FP)
- MOVL CX, ecx+16(FP)
- MOVL DX, edx+20(FP)
- RET
-
-// func xgetbv(index uint32) (eax, edx uint32)
-TEXT ·asmXgetbv(SB), 7, $0
- MOVL index+0(FP), CX
- BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
- MOVL AX, eax+4(FP)
- MOVL DX, edx+8(FP)
- RET
-
-// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
-TEXT ·asmRdtscpAsm(SB), 7, $0
- BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
- MOVL AX, eax+0(FP)
- MOVL BX, ebx+4(FP)
- MOVL CX, ecx+8(FP)
- MOVL DX, edx+12(FP)
- RET
diff --git a/vendor/github.com/klauspost/cpuid/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/cpuid_amd64.s
deleted file mode 100644
index 3c1d60e42..000000000
--- a/vendor/github.com/klauspost/cpuid/cpuid_amd64.s
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
-
-//+build amd64,!gccgo
-
-// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
-TEXT ·asmCpuid(SB), 7, $0
- XORQ CX, CX
- MOVL op+0(FP), AX
- CPUID
- MOVL AX, eax+8(FP)
- MOVL BX, ebx+12(FP)
- MOVL CX, ecx+16(FP)
- MOVL DX, edx+20(FP)
- RET
-
-// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
-TEXT ·asmCpuidex(SB), 7, $0
- MOVL op+0(FP), AX
- MOVL op2+4(FP), CX
- CPUID
- MOVL AX, eax+8(FP)
- MOVL BX, ebx+12(FP)
- MOVL CX, ecx+16(FP)
- MOVL DX, edx+20(FP)
- RET
-
-// func asmXgetbv(index uint32) (eax, edx uint32)
-TEXT ·asmXgetbv(SB), 7, $0
- MOVL index+0(FP), CX
- BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
- MOVL AX, eax+8(FP)
- MOVL DX, edx+12(FP)
- RET
-
-// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
-TEXT ·asmRdtscpAsm(SB), 7, $0
- BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
- MOVL AX, eax+0(FP)
- MOVL BX, ebx+4(FP)
- MOVL CX, ecx+8(FP)
- MOVL DX, edx+12(FP)
- RET
diff --git a/vendor/github.com/klauspost/cpuid/detect_intel.go b/vendor/github.com/klauspost/cpuid/detect_intel.go
deleted file mode 100644
index a5f04dd6d..000000000
--- a/vendor/github.com/klauspost/cpuid/detect_intel.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
-
-// +build 386,!gccgo amd64,!gccgo
-
-package cpuid
-
-func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
-func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
-func asmXgetbv(index uint32) (eax, edx uint32)
-func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
-
-func initCPU() {
- cpuid = asmCpuid
- cpuidex = asmCpuidex
- xgetbv = asmXgetbv
- rdtscpAsm = asmRdtscpAsm
-}
diff --git a/vendor/github.com/klauspost/cpuid/detect_ref.go b/vendor/github.com/klauspost/cpuid/detect_ref.go
deleted file mode 100644
index 909c5d9a7..000000000
--- a/vendor/github.com/klauspost/cpuid/detect_ref.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
-
-// +build !amd64,!386 gccgo
-
-package cpuid
-
-func initCPU() {
- cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
- return 0, 0, 0, 0
- }
-
- cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
- return 0, 0, 0, 0
- }
-
- xgetbv = func(index uint32) (eax, edx uint32) {
- return 0, 0
- }
-
- rdtscpAsm = func() (eax, ebx, ecx, edx uint32) {
- return 0, 0, 0, 0
- }
-}
diff --git a/vendor/github.com/klauspost/cpuid/generate.go b/vendor/github.com/klauspost/cpuid/generate.go
deleted file mode 100644
index 90e7a98d2..000000000
--- a/vendor/github.com/klauspost/cpuid/generate.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package cpuid
-
-//go:generate go run private-gen.go
-//go:generate gofmt -w ./private
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go
index 7fff0627f..debfc1e48 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go
@@ -6,6 +6,8 @@ import (
"fmt"
"io/ioutil"
"os"
+
+ "github.com/opencontainers/runc/libcontainer/utils"
)
// IsEnabled returns true if apparmor is enabled for the host.
@@ -19,7 +21,7 @@ func IsEnabled() bool {
return false
}
-func setprocattr(attr, value string) error {
+func setProcAttr(attr, value string) error {
// Under AppArmor you can only change your own attr, so use /proc/self/
// instead of /proc/<tid>/ like libapparmor does
path := fmt.Sprintf("/proc/self/attr/%s", attr)
@@ -30,6 +32,10 @@ func setprocattr(attr, value string) error {
}
defer f.Close()
+ if err := utils.EnsureProcHandle(f); err != nil {
+ return err
+ }
+
_, err = fmt.Fprintf(f, "%s", value)
return err
}
@@ -37,7 +43,7 @@ func setprocattr(attr, value string) error {
// changeOnExec reimplements aa_change_onexec from libapparmor in Go
func changeOnExec(name string) error {
value := "exec " + name
- if err := setprocattr("exec", value); err != nil {
+ if err := setProcAttr("exec", value); err != nil {
return fmt.Errorf("apparmor failed to apply profile: %s", err)
}
return nil
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
index ec79ae767..60790f83b 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
@@ -11,6 +11,8 @@ import (
"path/filepath"
"strconv"
"strings"
+ "sync"
+ "syscall"
"time"
units "github.com/docker/go-units"
@@ -22,6 +24,11 @@ const (
CgroupProcesses = "cgroup.procs"
)
+var (
+ isUnifiedOnce sync.Once
+ isUnified bool
+)
+
// HugePageSizeUnitList is a list of the units used by the linux kernel when
// naming the HugePage control files.
// https://www.kernel.org/doc/Documentation/cgroup-v1/hugetlb.txt
@@ -29,6 +36,18 @@ const (
// depends on https://github.com/docker/go-units/commit/a09cd47f892041a4fac473133d181f5aea6fa393
var HugePageSizeUnitList = []string{"B", "KB", "MB", "GB", "TB", "PB"}
+// IsCgroup2UnifiedMode returns whether we are running in cgroup v2 unified mode.
+func IsCgroup2UnifiedMode() bool {
+ isUnifiedOnce.Do(func() {
+ var st syscall.Statfs_t
+ if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil {
+ panic("cannot statfs cgroup root")
+ }
+ isUnified = st.Type == unix.CGROUP2_SUPER_MAGIC
+ })
+ return isUnified
+}
+
// https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt
func FindCgroupMountpoint(cgroupPath, subsystem string) (string, error) {
mnt, _, err := FindCgroupMountpointAndRoot(cgroupPath, subsystem)
@@ -49,6 +68,10 @@ func FindCgroupMountpointAndRoot(cgroupPath, subsystem string) (string, string,
}
defer f.Close()
+ if IsCgroup2UnifiedMode() {
+ subsystem = ""
+ }
+
return findCgroupMountpointAndRootFromReader(f, cgroupPath, subsystem)
}
@@ -57,12 +80,12 @@ func findCgroupMountpointAndRootFromReader(reader io.Reader, cgroupPath, subsyst
for scanner.Scan() {
txt := scanner.Text()
fields := strings.Fields(txt)
- if len(fields) < 5 {
+ if len(fields) < 9 {
continue
}
if strings.HasPrefix(fields[4], cgroupPath) {
for _, opt := range strings.Split(fields[len(fields)-1], ",") {
- if opt == subsystem {
+ if (subsystem == "" && fields[9] == "cgroup2") || opt == subsystem {
return fields[4], fields[3], nil
}
}
@@ -76,6 +99,19 @@ func findCgroupMountpointAndRootFromReader(reader io.Reader, cgroupPath, subsyst
}
func isSubsystemAvailable(subsystem string) bool {
+ if IsCgroup2UnifiedMode() {
+ controllers, err := GetAllSubsystems()
+ if err != nil {
+ return false
+ }
+ for _, c := range controllers {
+ if c == subsystem {
+ return true
+ }
+ }
+ return false
+ }
+
cgroups, err := ParseCgroupFile("/proc/self/cgroup")
if err != nil {
return false
@@ -120,7 +156,7 @@ func FindCgroupMountpointDir() (string, error) {
return "", fmt.Errorf("Found no fields post '-' in %q", text)
}
- if postSeparatorFields[0] == "cgroup" {
+ if postSeparatorFields[0] == "cgroup" || postSeparatorFields[0] == "cgroup2" {
// Check that the mount is properly formatted.
if numPostFields < 3 {
return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
@@ -193,6 +229,19 @@ func getCgroupMountsHelper(ss map[string]bool, mi io.Reader, all bool) ([]Mount,
// GetCgroupMounts returns the mounts for the cgroup subsystems.
// all indicates whether to return just the first instance or all the mounts.
func GetCgroupMounts(all bool) ([]Mount, error) {
+ if IsCgroup2UnifiedMode() {
+ availableControllers, err := GetAllSubsystems()
+ if err != nil {
+ return nil, err
+ }
+ m := Mount{
+ Mountpoint: "/sys/fs/cgroup",
+ Root: "/sys/fs/cgroup",
+ Subsystems: availableControllers,
+ }
+ return []Mount{m}, nil
+ }
+
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return nil, err
@@ -356,6 +405,9 @@ func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
}
func getControllerPath(subsystem string, cgroups map[string]string) (string, error) {
+ if IsCgroup2UnifiedMode() {
+ return "/", nil
+ }
if p, ok := cgroups[subsystem]; ok {
return p, nil
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
index e0f3ca165..fa195bf90 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
@@ -59,3 +59,8 @@ func NewThrottleDevice(major, minor int64, rate uint64) *ThrottleDevice {
func (td *ThrottleDevice) String() string {
return fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate)
}
+
+// StringName formats the struct to be writable to the cgroup specific file
+func (td *ThrottleDevice) StringName(name string) string {
+ return fmt.Sprintf("%d:%d %s=%d", td.Major, td.Minor, name, td.Rate)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go
index e15a662f5..58ed19c9e 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go
@@ -119,4 +119,12 @@ type Resources struct {
// Set class identifier for container's network packets
NetClsClassid uint32 `json:"net_cls_classid_u"`
+
+ // Used on cgroups v2:
+
+ // CpuWeight sets a proportional bandwidth limit.
+ CpuWeight uint64 `json:"cpu_weight"`
+
+ // CpuMax sets she maximum bandwidth limit (format: max period).
+ CpuMax string `json:"cpu_max"`
}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
index 7728522fe..24989e9f5 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
@@ -44,6 +44,7 @@ const (
Trap
Allow
Trace
+ Log
)
// Operator is a comparison operator to be used when matching syscall arguments in Seccomp
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go
index 11c3faafb..e05e30adc 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go
@@ -1,5 +1,5 @@
// +build linux
-// +build arm64 amd64 mips mipsle mips64 mips64le ppc ppc64 ppc64le s390x
+// +build arm64 amd64 mips mipsle mips64 mips64le ppc ppc64 ppc64le riscv64 s390x
package system
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go
new file mode 100644
index 000000000..c8a9364d5
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go
@@ -0,0 +1,93 @@
+// +build linux
+
+package utils
+
+/*
+ * Copyright 2016, 2017 SUSE LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import (
+ "fmt"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// MaxSendfdLen is the maximum length of the name of a file descriptor being
+// sent using SendFd. The name of the file handle returned by RecvFd will never
+// be larger than this value.
+const MaxNameLen = 4096
+
+// oobSpace is the size of the oob slice required to store a single FD. Note
+// that unix.UnixRights appears to make the assumption that fd is always int32,
+// so sizeof(fd) = 4.
+var oobSpace = unix.CmsgSpace(4)
+
+// RecvFd waits for a file descriptor to be sent over the given AF_UNIX
+// socket. The file name of the remote file descriptor will be recreated
+// locally (it is sent as non-auxiliary data in the same payload).
+func RecvFd(socket *os.File) (*os.File, error) {
+ // For some reason, unix.Recvmsg uses the length rather than the capacity
+ // when passing the msg_controllen and other attributes to recvmsg. So we
+ // have to actually set the length.
+ name := make([]byte, MaxNameLen)
+ oob := make([]byte, oobSpace)
+
+ sockfd := socket.Fd()
+ n, oobn, _, _, err := unix.Recvmsg(int(sockfd), name, oob, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ if n >= MaxNameLen || oobn != oobSpace {
+ return nil, fmt.Errorf("recvfd: incorrect number of bytes read (n=%d oobn=%d)", n, oobn)
+ }
+
+ // Truncate.
+ name = name[:n]
+ oob = oob[:oobn]
+
+ scms, err := unix.ParseSocketControlMessage(oob)
+ if err != nil {
+ return nil, err
+ }
+ if len(scms) != 1 {
+ return nil, fmt.Errorf("recvfd: number of SCMs is not 1: %d", len(scms))
+ }
+ scm := scms[0]
+
+ fds, err := unix.ParseUnixRights(&scm)
+ if err != nil {
+ return nil, err
+ }
+ if len(fds) != 1 {
+ return nil, fmt.Errorf("recvfd: number of fds is not 1: %d", len(fds))
+ }
+ fd := uintptr(fds[0])
+
+ return os.NewFile(fd, string(name)), nil
+}
+
+// SendFd sends a file descriptor over the given AF_UNIX socket. In
+// addition, the file.Name() of the given file will also be sent as
+// non-auxiliary data in the same payload (allowing to send contextual
+// information for a file descriptor).
+func SendFd(socket *os.File, name string, fd uintptr) error {
+ if len(name) >= MaxNameLen {
+ return fmt.Errorf("sendfd: filename too long: %s", name)
+ }
+ oob := unix.UnixRights(int(fd))
+ return unix.Sendmsg(int(socket.Fd()), []byte(name), oob, nil, 0)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
new file mode 100644
index 000000000..40ccfaa1a
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
@@ -0,0 +1,112 @@
+package utils
+
+import (
+ "encoding/json"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ exitSignalOffset = 128
+)
+
+// ResolveRootfs ensures that the current working directory is
+// not a symlink and returns the absolute path to the rootfs
+func ResolveRootfs(uncleanRootfs string) (string, error) {
+ rootfs, err := filepath.Abs(uncleanRootfs)
+ if err != nil {
+ return "", err
+ }
+ return filepath.EvalSymlinks(rootfs)
+}
+
+// ExitStatus returns the correct exit status for a process based on if it
+// was signaled or exited cleanly
+func ExitStatus(status unix.WaitStatus) int {
+ if status.Signaled() {
+ return exitSignalOffset + int(status.Signal())
+ }
+ return status.ExitStatus()
+}
+
+// WriteJSON writes the provided struct v to w using standard json marshaling
+func WriteJSON(w io.Writer, v interface{}) error {
+ data, err := json.Marshal(v)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ return err
+}
+
+// CleanPath makes a path safe for use with filepath.Join. This is done by not
+// only cleaning the path, but also (if the path is relative) adding a leading
+// '/' and cleaning it (then removing the leading '/'). This ensures that a
+// path resulting from prepending another path will always resolve to lexically
+// be a subdirectory of the prefixed path. This is all done lexically, so paths
+// that include symlinks won't be safe as a result of using CleanPath.
+func CleanPath(path string) string {
+ // Deal with empty strings nicely.
+ if path == "" {
+ return ""
+ }
+
+ // Ensure that all paths are cleaned (especially problematic ones like
+ // "/../../../../../" which can cause lots of issues).
+ path = filepath.Clean(path)
+
+ // If the path isn't absolute, we need to do more processing to fix paths
+ // such as "../../../../<etc>/some/path". We also shouldn't convert absolute
+ // paths to relative ones.
+ if !filepath.IsAbs(path) {
+ path = filepath.Clean(string(os.PathSeparator) + path)
+ // This can't fail, as (by definition) all paths are relative to root.
+ path, _ = filepath.Rel(string(os.PathSeparator), path)
+ }
+
+ // Clean the path again for good measure.
+ return filepath.Clean(path)
+}
+
+// SearchLabels searches a list of key-value pairs for the provided key and
+// returns the corresponding value. The pairs must be separated with '='.
+func SearchLabels(labels []string, query string) string {
+ for _, l := range labels {
+ parts := strings.SplitN(l, "=", 2)
+ if len(parts) < 2 {
+ continue
+ }
+ if parts[0] == query {
+ return parts[1]
+ }
+ }
+ return ""
+}
+
+// Annotations returns the bundle path and user defined annotations from the
+// libcontainer state. We need to remove the bundle because that is a label
+// added by libcontainer.
+func Annotations(labels []string) (bundle string, userAnnotations map[string]string) {
+ userAnnotations = make(map[string]string)
+ for _, l := range labels {
+ parts := strings.SplitN(l, "=", 2)
+ if len(parts) < 2 {
+ continue
+ }
+ if parts[0] == "bundle" {
+ bundle = parts[1]
+ } else {
+ userAnnotations[parts[0]] = parts[1]
+ }
+ }
+ return
+}
+
+func GetIntSize() int {
+ return int(unsafe.Sizeof(1))
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go
new file mode 100644
index 000000000..1576f2d4a
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go
@@ -0,0 +1,68 @@
+// +build !windows
+
+package utils
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+
+ "golang.org/x/sys/unix"
+)
+
+// EnsureProcHandle returns whether or not the given file handle is on procfs.
+func EnsureProcHandle(fh *os.File) error {
+ var buf unix.Statfs_t
+ if err := unix.Fstatfs(int(fh.Fd()), &buf); err != nil {
+ return fmt.Errorf("ensure %s is on procfs: %v", fh.Name(), err)
+ }
+ if buf.Type != unix.PROC_SUPER_MAGIC {
+ return fmt.Errorf("%s is not on procfs", fh.Name())
+ }
+ return nil
+}
+
+// CloseExecFrom applies O_CLOEXEC to all file descriptors currently open for
+// the process (except for those below the given fd value).
+func CloseExecFrom(minFd int) error {
+ fdDir, err := os.Open("/proc/self/fd")
+ if err != nil {
+ return err
+ }
+ defer fdDir.Close()
+
+ if err := EnsureProcHandle(fdDir); err != nil {
+ return err
+ }
+
+ fdList, err := fdDir.Readdirnames(-1)
+ if err != nil {
+ return err
+ }
+ for _, fdStr := range fdList {
+ fd, err := strconv.Atoi(fdStr)
+ // Ignore non-numeric file names.
+ if err != nil {
+ continue
+ }
+ // Ignore descriptors lower than our specified minimum.
+ if fd < minFd {
+ continue
+ }
+ // Intentionally ignore errors from unix.CloseOnExec -- the cases where
+ // this might fail are basically file descriptors that have already
+ // been closed (including and especially the one that was created when
+ // ioutil.ReadDir did the "opendir" syscall).
+ unix.CloseOnExec(fd)
+ }
+ return nil
+}
+
+// NewSockPair returns a new unix socket pair
+func NewSockPair(name string) (parent *os.File, child *os.File, err error) {
+ fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ return os.NewFile(uintptr(fds[1]), name+"-p"), os.NewFile(uintptr(fds[0]), name+"-c"), nil
+}
diff --git a/vendor/github.com/pkg/profile/.travis.yml b/vendor/github.com/pkg/profile/.travis.yml
index 1c9e6bb6b..fd72871e0 100644
--- a/vendor/github.com/pkg/profile/.travis.yml
+++ b/vendor/github.com/pkg/profile/.travis.yml
@@ -1,8 +1,8 @@
language: go
go_import_path: github.com/pkg/profile
go:
- - 1.10.x
- 1.12.x
+ - 1.13.x
- tip
script:
diff --git a/vendor/github.com/pkg/profile/go.mod b/vendor/github.com/pkg/profile/go.mod
new file mode 100644
index 000000000..2d82f3d84
--- /dev/null
+++ b/vendor/github.com/pkg/profile/go.mod
@@ -0,0 +1,3 @@
+module github.com/pkg/profile
+
+go 1.12
diff --git a/vendor/github.com/pkg/profile/mutex.go b/vendor/github.com/pkg/profile/mutex.go
deleted file mode 100644
index e69c5b44d..000000000
--- a/vendor/github.com/pkg/profile/mutex.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build go1.8
-
-package profile
-
-import "runtime"
-
-func enableMutexProfile() {
- runtime.SetMutexProfileFraction(1)
-}
-
-func disableMutexProfile() {
- runtime.SetMutexProfileFraction(0)
-}
diff --git a/vendor/github.com/pkg/profile/mutex17.go b/vendor/github.com/pkg/profile/mutex17.go
deleted file mode 100644
index b004c21d5..000000000
--- a/vendor/github.com/pkg/profile/mutex17.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build !go1.8
-
-package profile
-
-// mock mutex support for Go 1.7 and earlier.
-
-func enableMutexProfile() {}
-
-func disableMutexProfile() {}
diff --git a/vendor/github.com/pkg/profile/profile.go b/vendor/github.com/pkg/profile/profile.go
index 20e285427..b9fdfcfd8 100644
--- a/vendor/github.com/pkg/profile/profile.go
+++ b/vendor/github.com/pkg/profile/profile.go
@@ -10,6 +10,7 @@ import (
"path/filepath"
"runtime"
"runtime/pprof"
+ "runtime/trace"
"sync/atomic"
)
@@ -20,6 +21,7 @@ const (
blockMode
traceMode
threadCreateMode
+ goroutineMode
)
// Profile represents an active profiling session.
@@ -98,6 +100,10 @@ func TraceProfile(p *Profile) { p.mode = traceMode }
// It disables any previous profiling settings.
func ThreadcreationProfile(p *Profile) { p.mode = threadCreateMode }
+// GoroutineProfile enables goroutine profiling.
+// It disables any previous profiling settings.
+func GoroutineProfile(p *Profile) { p.mode = goroutineMode }
+
// ProfilePath controls the base path where various profiling
// files are written. If blank, the base path will be generated
// by ioutil.TempDir.
@@ -189,14 +195,14 @@ func Start(options ...func(*Profile)) interface {
if err != nil {
log.Fatalf("profile: could not create mutex profile %q: %v", fn, err)
}
- enableMutexProfile()
+ runtime.SetMutexProfileFraction(1)
logf("profile: mutex profiling enabled, %s", fn)
prof.closer = func() {
if mp := pprof.Lookup("mutex"); mp != nil {
mp.WriteTo(f, 0)
}
f.Close()
- disableMutexProfile()
+ runtime.SetMutexProfileFraction(0)
logf("profile: mutex profiling disabled, %s", fn)
}
@@ -236,14 +242,29 @@ func Start(options ...func(*Profile)) interface {
if err != nil {
log.Fatalf("profile: could not create trace output file %q: %v", fn, err)
}
- if err := startTrace(f); err != nil {
+ if err := trace.Start(f); err != nil {
log.Fatalf("profile: could not start trace: %v", err)
}
logf("profile: trace enabled, %s", fn)
prof.closer = func() {
- stopTrace()
+ trace.Stop()
logf("profile: trace disabled, %s", fn)
}
+
+ case goroutineMode:
+ fn := filepath.Join(path, "goroutine.pprof")
+ f, err := os.Create(fn)
+ if err != nil {
+ log.Fatalf("profile: could not create goroutine profile %q: %v", fn, err)
+ }
+ logf("profile: goroutine profiling enabled, %s", fn)
+ prof.closer = func() {
+ if mp := pprof.Lookup("goroutine"); mp != nil {
+ mp.WriteTo(f, 0)
+ }
+ f.Close()
+ logf("profile: goroutine profiling disabled, %s", fn)
+ }
}
if !prof.noShutdownHook {
diff --git a/vendor/github.com/pkg/profile/trace.go b/vendor/github.com/pkg/profile/trace.go
deleted file mode 100644
index b349ed8b2..000000000
--- a/vendor/github.com/pkg/profile/trace.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build go1.7
-
-package profile
-
-import "runtime/trace"
-
-var startTrace = trace.Start
-var stopTrace = trace.Stop
diff --git a/vendor/github.com/pkg/profile/trace16.go b/vendor/github.com/pkg/profile/trace16.go
deleted file mode 100644
index 6aa6566ef..000000000
--- a/vendor/github.com/pkg/profile/trace16.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build !go1.7
-
-package profile
-
-import "io"
-
-// mock trace support for Go 1.6 and earlier.
-
-func startTrace(w io.Writer) error { return nil }
-func stopTrace() {}
diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml
index 9f556934d..055480b9e 100644
--- a/vendor/gopkg.in/yaml.v2/.travis.yml
+++ b/vendor/gopkg.in/yaml.v2/.travis.yml
@@ -1,12 +1,16 @@
language: go
go:
- - 1.4
- - 1.5
- - 1.6
- - 1.7
- - 1.8
- - 1.9
- - tip
+ - "1.4.x"
+ - "1.5.x"
+ - "1.6.x"
+ - "1.7.x"
+ - "1.8.x"
+ - "1.9.x"
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
+ - "1.13.x"
+ - "tip"
go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
index 570b8ecd1..b33bdbaec 100644
--- a/vendor/gopkg.in/yaml.v2/scannerc.go
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -634,13 +634,14 @@ func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
need_more_tokens = true
} else {
// Check if any potential simple key may occupy the head position.
- if !yaml_parser_stale_simple_keys(parser) {
- return false
- }
-
- for i := range parser.simple_keys {
+ for i := len(parser.simple_keys) - 1; i >= 0; i-- {
simple_key := &parser.simple_keys[i]
- if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+ if simple_key.token_number < parser.tokens_parsed {
+ break
+ }
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+ return false
+ } else if valid && simple_key.token_number == parser.tokens_parsed {
need_more_tokens = true
break
}
@@ -678,11 +679,6 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
return false
}
- // Remove obsolete potential simple keys.
- if !yaml_parser_stale_simple_keys(parser) {
- return false
- }
-
// Check the indentation level against the current column.
if !yaml_parser_unroll_indent(parser, parser.mark.column) {
return false
@@ -837,29 +833,30 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
"found character that cannot start any token")
}
-// Check the list of potential simple keys and remove the positions that
-// cannot contain simple keys anymore.
-func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
- // Check for a potential simple key for each flow level.
- for i := range parser.simple_keys {
- simple_key := &parser.simple_keys[i]
-
- // The specification requires that a simple key
- //
- // - is limited to a single line,
- // - is shorter than 1024 characters.
- if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
-
- // Check if the potential simple key to be removed is required.
- if simple_key.required {
- return yaml_parser_set_scanner_error(parser,
- "while scanning a simple key", simple_key.mark,
- "could not find expected ':'")
- }
- simple_key.possible = false
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+ if !simple_key.possible {
+ return false, true
+ }
+
+ // The 1.2 specification says:
+ //
+ // "If the ? indicator is omitted, parsing needs to see past the
+ // implicit key to recognize it as such. To limit the amount of
+ // lookahead required, the “:” indicator must appear at most 1024
+ // Unicode characters beyond the start of the key. In addition, the key
+ // is restricted to a single line."
+ //
+ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return false, yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
}
+ simple_key.possible = false
+ return false, true
}
- return true
+ return true, true
}
// Check if a simple key may start at the current position and add it if
@@ -879,8 +876,8 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
possible: true,
required: required,
token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
}
- simple_key.mark = parser.mark
if !yaml_parser_remove_simple_key(parser) {
return false
@@ -912,7 +909,12 @@ const max_flow_level = 10000
// Increase the flow level and resize the simple key list if needed.
func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
// Reset the simple key on the next level.
- parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+ possible: false,
+ required: false,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ })
// Increase the flow level.
parser.flow_level++
@@ -1286,7 +1288,11 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
// Have we found a simple key?
- if simple_key.possible {
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+ return false
+
+ } else if valid {
+
// Create the KEY token and insert it into the queue.
token := yaml_token_t{
typ: yaml_KEY_TOKEN,
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go
index de85aa4cd..89650e293 100644
--- a/vendor/gopkg.in/yaml.v2/yaml.go
+++ b/vendor/gopkg.in/yaml.v2/yaml.go
@@ -89,7 +89,7 @@ func UnmarshalStrict(in []byte, out interface{}) (err error) {
return unmarshal(in, out, true)
}
-// A Decorder reads and decodes YAML values from an input stream.
+// A Decoder reads and decodes YAML values from an input stream.
type Decoder struct {
strict bool
parser *parser
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 6584cea1f..5a17ebd78 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -8,7 +8,7 @@ github.com/Microsoft/go-winio
github.com/Microsoft/go-winio/archive/tar
github.com/Microsoft/go-winio/backuptar
github.com/Microsoft/go-winio/pkg/guid
-# github.com/Microsoft/hcsshim v0.8.7-0.20191101173118-65519b62243c
+# github.com/Microsoft/hcsshim v0.8.7
github.com/Microsoft/hcsshim
github.com/Microsoft/hcsshim/internal/cow
github.com/Microsoft/hcsshim/internal/hcs
@@ -60,7 +60,7 @@ github.com/containernetworking/plugins/pkg/ns
github.com/containernetworking/plugins/pkg/utils/hwaddr
github.com/containernetworking/plugins/plugins/ipam/host-local/backend
github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator
-# github.com/containers/buildah v1.11.5
+# github.com/containers/buildah v1.11.6
github.com/containers/buildah
github.com/containers/buildah/bind
github.com/containers/buildah/chroot
@@ -125,7 +125,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.13.5
+# github.com/containers/storage v1.15.2
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -295,15 +295,13 @@ github.com/inconshreveable/mousetrap
github.com/ishidawataru/sctp
# github.com/json-iterator/go v1.1.8
github.com/json-iterator/go
-# github.com/klauspost/compress v1.8.1
+# github.com/klauspost/compress v1.9.4
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
github.com/klauspost/compress/snappy
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
-# github.com/klauspost/cpuid v1.2.1
-github.com/klauspost/cpuid
# github.com/klauspost/pgzip v1.2.1
github.com/klauspost/pgzip
# github.com/konsorten/go-windows-terminal-sequences v1.0.2
@@ -373,13 +371,14 @@ github.com/opencontainers/go-digest
# github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/image-spec/specs-go
github.com/opencontainers/image-spec/specs-go/v1
-# github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158
+# github.com/opencontainers/runc v1.0.0-rc9
github.com/opencontainers/runc/libcontainer/apparmor
github.com/opencontainers/runc/libcontainer/cgroups
github.com/opencontainers/runc/libcontainer/configs
github.com/opencontainers/runc/libcontainer/devices
github.com/opencontainers/runc/libcontainer/system
github.com/opencontainers/runc/libcontainer/user
+github.com/opencontainers/runc/libcontainer/utils
# github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7
github.com/opencontainers/runtime-spec/specs-go
# github.com/opencontainers/runtime-tools v0.9.0
@@ -409,7 +408,7 @@ github.com/ostreedev/ostree-go/pkg/glibobject
github.com/ostreedev/ostree-go/pkg/otbuiltin
# github.com/pkg/errors v0.8.1
github.com/pkg/errors
-# github.com/pkg/profile v1.3.0
+# github.com/pkg/profile v1.4.0
github.com/pkg/profile
# github.com/pmezard/go-difflib v1.0.0
github.com/pmezard/go-difflib/difflib
@@ -584,7 +583,7 @@ gopkg.in/fsnotify.v1
gopkg.in/inf.v0
# gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
gopkg.in/tomb.v1
-# gopkg.in/yaml.v2 v2.2.5
+# gopkg.in/yaml.v2 v2.2.7
gopkg.in/yaml.v2
# k8s.io/api v0.0.0-20190813020757-36bff7324fb7
k8s.io/api/core/v1