summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml240
-rw-r--r--Dockerfile21
-rw-r--r--Dockerfile.ubuntu28
-rw-r--r--Makefile5
-rw-r--r--README.md4
-rw-r--r--cmd/podman/containers/ps.go12
-rw-r--r--cmd/podman/containers/runlabel.go5
-rw-r--r--cmd/podman/generate/systemd.go71
-rw-r--r--cmd/podman/images/build.go28
-rw-r--r--cmd/podman/images/save.go14
-rw-r--r--cmd/podman/manifest/add.go49
-rw-r--r--cmd/podman/networks/create.go3
-rw-r--r--cmd/podman/networks/inspect.go3
-rw-r--r--cmd/podman/networks/list.go3
-rw-r--r--cmd/podman/networks/rm.go3
-rw-r--r--cmd/podman/root.go8
-rw-r--r--cmd/podman/system/df.go8
-rw-r--r--cmd/podman/utils/alias.go2
-rw-r--r--completions/bash/podman5
-rw-r--r--contrib/cirrus/README.md182
-rw-r--r--contrib/cirrus/add_second_partition.sh3
-rwxr-xr-xcontrib/cirrus/build_vm_images.sh67
-rwxr-xr-xcontrib/cirrus/check_image.sh85
-rw-r--r--contrib/cirrus/git_authors_to_irc_nicks.csv12
-rw-r--r--contrib/cirrus/lib.sh178
-rwxr-xr-xcontrib/cirrus/lib.sh.t44
-rwxr-xr-xcontrib/cirrus/notice_branch_failure.sh19
-rw-r--r--contrib/cirrus/packer/.gitignore7
-rw-r--r--contrib/cirrus/packer/Makefile94
-rw-r--r--contrib/cirrus/packer/README.how-to-update-cirrus-vms89
-rw-r--r--contrib/cirrus/packer/README.md3
-rw-r--r--contrib/cirrus/packer/cloud-init/fedora/cloud-init.service20
-rw-r--r--contrib/cirrus/packer/cloud-init/fedora/cloud.cfg.d/40_enable_root.cfg1
-rw-r--r--contrib/cirrus/packer/cloud-init/fedora/cloud.cfg.d/50_custom_disk_setup.cfg4
-rw-r--r--contrib/cirrus/packer/cloud-init/ubuntu/cloud.cfg.d/40_enable_root.cfg1
-rw-r--r--contrib/cirrus/packer/fedora_base-setup.sh44
-rw-r--r--contrib/cirrus/packer/fedora_packaging.sh194
-rw-r--r--contrib/cirrus/packer/fedora_setup.sh34
-rw-r--r--contrib/cirrus/packer/image-builder-image_base-setup.sh71
-rw-r--r--contrib/cirrus/packer/libpod_base_images.yml164
-rw-r--r--contrib/cirrus/packer/libpod_images.yml86
-rw-r--r--contrib/cirrus/packer/make-user-data.sh20
-rw-r--r--contrib/cirrus/packer/prior-fedora_base-setup.sh44
-rwxr-xr-xcontrib/cirrus/packer/systemd_banish.sh28
-rw-r--r--contrib/cirrus/packer/ubuntu_packaging.sh175
-rw-r--r--contrib/cirrus/packer/ubuntu_setup.sh35
-rw-r--r--contrib/cirrus/packer/xfedora_setup.sh34
-rwxr-xr-xcontrib/cirrus/podbot.py105
-rwxr-xr-xcontrib/cirrus/setup_environment.sh21
-rwxr-xr-xcontrib/cirrus/success.sh66
-rw-r--r--contrib/podmanimage/README.md5
-rw-r--r--contrib/rootless-cni-infra/Containerfile35
-rw-r--r--contrib/rootless-cni-infra/README.md22
-rwxr-xr-xcontrib/rootless-cni-infra/rootless-cni-infra147
-rw-r--r--docs/source/Tutorials.rst2
-rw-r--r--docs/source/markdown/podman-build.1.md4
-rw-r--r--docs/source/markdown/podman-generate-systemd.1.md6
-rw-r--r--docs/source/markdown/podman-login.1.md12
-rw-r--r--docs/source/markdown/podman-logout.1.md4
-rw-r--r--docs/source/markdown/podman-manifest-add.1.md23
-rw-r--r--docs/source/markdown/podman-ps.1.md19
-rw-r--r--docs/source/markdown/podman-rm.1.md8
-rw-r--r--docs/source/markdown/podman-save.1.md4
-rw-r--r--docs/tutorials/README.md4
-rw-r--r--docs/tutorials/mac_client.md101
-rw-r--r--docs/tutorials/mac_win_client.md111
-rw-r--r--docs/tutorials/podman_tutorial.md2
-rw-r--r--docs/tutorials/remote_client.md136
-rw-r--r--docs/tutorials/varlink_remote_client.md89
-rw-r--r--go.mod16
-rw-r--r--go.sum67
-rw-r--r--libpod/container_internal.go6
-rw-r--r--libpod/container_internal_linux.go435
-rw-r--r--libpod/container_internal_linux_test.go34
-rw-r--r--libpod/container_validate.go11
-rw-r--r--libpod/define/errors.go4
-rw-r--r--libpod/image/image.go193
-rw-r--r--libpod/image/layer_tree.go4
-rw-r--r--libpod/image/prune.go2
-rw-r--r--libpod/image/pull.go113
-rw-r--r--libpod/image/pull_test.go38
-rw-r--r--libpod/kube.go10
-rw-r--r--libpod/networking_linux.go24
-rw-r--r--libpod/networking_unsupported.go4
-rw-r--r--libpod/oci_conmon_linux.go4
-rw-r--r--libpod/options.go12
-rw-r--r--libpod/rootless_cni_linux.go320
-rw-r--r--libpod/runtime_ctr.go37
-rw-r--r--libpod/runtime_img.go9
-rw-r--r--pkg/api/handlers/compat/containers.go13
-rw-r--r--pkg/api/handlers/compat/images.go1
-rw-r--r--pkg/api/handlers/compat/networks.go22
-rw-r--r--pkg/api/handlers/libpod/generate.go45
-rw-r--r--pkg/api/handlers/libpod/images.go70
-rw-r--r--pkg/api/handlers/libpod/networks.go16
-rw-r--r--pkg/api/handlers/libpod/pods.go2
-rw-r--r--pkg/api/server/register_generate.go62
-rw-r--r--pkg/api/server/register_images.go34
-rw-r--r--pkg/api/server/register_networks.go12
-rw-r--r--pkg/bindings/generate/generate.go27
-rw-r--r--pkg/bindings/images/images.go28
-rw-r--r--pkg/bindings/network/network.go8
-rw-r--r--pkg/domain/entities/containers.go1
-rw-r--r--pkg/domain/entities/generate.go7
-rw-r--r--pkg/domain/entities/images.go17
-rw-r--r--pkg/domain/entities/manifest.go21
-rw-r--r--pkg/domain/entities/system.go7
-rw-r--r--pkg/domain/infra/abi/containers.go9
-rw-r--r--pkg/domain/infra/abi/containers_runlabel.go37
-rw-r--r--pkg/domain/infra/abi/generate.go8
-rw-r--r--pkg/domain/infra/abi/images.go24
-rw-r--r--pkg/domain/infra/abi/manifest.go20
-rw-r--r--pkg/domain/infra/abi/network.go4
-rw-r--r--pkg/domain/infra/abi/play.go12
-rw-r--r--pkg/domain/infra/abi/system.go11
-rw-r--r--pkg/domain/infra/tunnel/containers.go93
-rw-r--r--pkg/domain/infra/tunnel/generate.go3
-rw-r--r--pkg/domain/infra/tunnel/images.go23
-rw-r--r--pkg/domain/infra/tunnel/network.go2
-rw-r--r--pkg/network/files.go13
-rw-r--r--pkg/ps/ps.go85
-rw-r--r--pkg/specgen/generate/oci.go3
-rw-r--r--pkg/specgen/generate/security.go2
-rw-r--r--pkg/systemd/generate/containers.go33
-rw-r--r--pkg/systemd/generate/containers_test.go27
-rw-r--r--pkg/systemd/generate/pods.go49
-rw-r--r--pkg/systemd/generate/pods_test.go9
-rw-r--r--rootless.md3
-rw-r--r--test/apiv2/35-networks.at21
-rw-r--r--test/apiv2/40-pods.at4
-rw-r--r--test/e2e/build_test.go23
-rw-r--r--test/e2e/common_test.go6
-rw-r--r--test/e2e/generate_kube_test.go29
-rw-r--r--test/e2e/generate_systemd_test.go40
-rw-r--r--test/e2e/load_test.go8
-rw-r--r--test/e2e/network_create_test.go2
-rw-r--r--test/e2e/network_test.go26
-rw-r--r--test/e2e/play_kube_test.go44
-rw-r--r--test/e2e/ps_test.go6
-rw-r--r--test/e2e/pull_test.go43
-rw-r--r--test/e2e/run_apparmor_test.go13
-rw-r--r--test/e2e/run_networking_test.go10
-rw-r--r--test/e2e/run_passwd_test.go54
-rw-r--r--test/e2e/run_privileged_test.go42
-rw-r--r--test/e2e/run_test.go12
-rw-r--r--test/e2e/run_userns_test.go7
-rw-r--r--test/e2e/runlabel_test.go18
-rw-r--r--test/e2e/save_test.go47
-rw-r--r--test/e2e/systemd_test.go8
l---------test/e2e/testdata/image1
-rw-r--r--test/system/001-basic.bats11
-rw-r--r--test/system/010-images.bats14
-rw-r--r--test/system/030-run.bats41
-rw-r--r--test/system/055-rm.bats2
-rw-r--r--test/system/110-history.bats3
-rw-r--r--test/system/120-load.bats34
-rw-r--r--test/system/130-kill.bats2
-rw-r--r--test/system/150-login.bats43
-rw-r--r--test/system/260-sdnotify.bats5
-rw-r--r--test/system/500-networking.bats13
-rwxr-xr-xtest/system/build-testimage59
-rw-r--r--test/system/helpers.bash6
-rw-r--r--troubleshooting.md25
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/conversion.go204
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/filter.go237
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go4
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/validate.go29
-rw-r--r--vendor/github.com/containers/common/version/version.go2
-rw-r--r--vendor/github.com/containers/image/v5/copy/copy.go4
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/dest.go50
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/reader.go120
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/src.go22
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/transport.go95
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/writer.go82
-rw-r--r--vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go9
-rw-r--r--vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go5
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_dest.go15
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_src.go6
-rw-r--r--vendor/github.com/containers/image/v5/docker/errors.go14
-rw-r--r--vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go217
-rw-r--r--vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go269
-rw-r--r--vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go331
-rw-r--r--vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go28
-rw-r--r--vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go381
-rw-r--r--vendor/github.com/containers/image/v5/docker/lookaside.go21
-rw-r--r--vendor/github.com/containers/image/v5/docker/tarfile/dest.go343
-rw-r--r--vendor/github.com/containers/image/v5/docker/tarfile/src.go442
-rw-r--r--vendor/github.com/containers/image/v5/docker/tarfile/types.go24
-rw-r--r--vendor/github.com/containers/image/v5/oci/layout/oci_src.go4
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config.go4
-rw-r--r--vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go167
-rw-r--r--vendor/github.com/containers/image/v5/version/version.go2
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/go.mod5
-rw-r--r--vendor/github.com/containers/storage/go.sum8
-rw-r--r--vendor/github.com/containers/storage/layers.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mount.go29
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mountinfo.go49
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go119
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go12
-rw-r--r--vendor/github.com/containers/storage/utils.go16
-rw-r--r--vendor/github.com/gorilla/mux/mux.go3
-rw-r--r--vendor/github.com/gorilla/mux/regexp.go6
-rw-r--r--vendor/github.com/imdario/mergo/README.md69
-rw-r--r--vendor/github.com/imdario/mergo/doc.go141
-rw-r--r--vendor/github.com/imdario/mergo/go.mod5
-rw-r--r--vendor/github.com/imdario/mergo/go.sum4
-rw-r--r--vendor/github.com/imdario/mergo/map.go10
-rw-r--r--vendor/github.com/imdario/mergo/merge.go269
-rw-r--r--vendor/github.com/imdario/mergo/mergo.go21
-rw-r--r--vendor/github.com/klauspost/compress/flate/fast_encoder.go10
-rw-r--r--vendor/github.com/klauspost/compress/flate/gen_inflate.go32
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go8
-rw-r--r--vendor/github.com/klauspost/compress/flate/inflate.go38
-rw-r--r--vendor/github.com/klauspost/compress/flate/inflate_gen.go128
-rw-r--r--vendor/github.com/klauspost/compress/flate/regmask_amd64.go37
-rw-r--r--vendor/github.com/klauspost/compress/flate/regmask_other.go39
-rw-r--r--vendor/github.com/klauspost/compress/huff0/huff0.go10
-rw-r--r--vendor/github.com/klauspost/compress/zstd/README.md51
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockdec.go2
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockenc.go67
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder.go8
-rw-r--r--vendor/github.com/klauspost/compress/zstd/dict.go26
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_base.go155
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_better.go81
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_dfast.go39
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_fast.go170
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_params.go157
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder.go39
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder_options.go14
-rw-r--r--vendor/github.com/klauspost/compress/zstd/frameenc.go26
-rw-r--r--vendor/github.com/klauspost/compress/zstd/history.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec.go8
-rw-r--r--vendor/github.com/klauspost/compress/zstd/snappy.go8
-rw-r--r--vendor/github.com/klauspost/pgzip/.travis.yml17
-rw-r--r--vendor/github.com/klauspost/pgzip/gunzip.go11
-rw-r--r--vendor/github.com/mattn/go-isatty/.travis.yml14
-rw-r--r--vendor/github.com/mattn/go-isatty/LICENSE9
-rw-r--r--vendor/github.com/mattn/go-isatty/README.md50
-rw-r--r--vendor/github.com/mattn/go-isatty/doc.go2
-rw-r--r--vendor/github.com/mattn/go-isatty/go.mod5
-rw-r--r--vendor/github.com/mattn/go-isatty/go.sum2
-rw-r--r--vendor/github.com/mattn/go-isatty/go.test.sh12
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_bsd.go18
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_others.go15
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_plan9.go22
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_solaris.go22
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_tcgets.go18
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_windows.go125
-rw-r--r--vendor/github.com/mattn/go-isatty/renovate.json8
-rw-r--r--vendor/github.com/moby/sys/mountinfo/LICENSE202
-rw-r--r--vendor/github.com/moby/sys/mountinfo/go.mod3
-rw-r--r--vendor/github.com/moby/sys/mountinfo/mountinfo.go67
-rw-r--r--vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go58
-rw-r--r--vendor/github.com/moby/sys/mountinfo/mountinfo_freebsd.go (renamed from vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go)22
-rw-r--r--vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go152
-rw-r--r--vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go17
-rw-r--r--vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go12
-rw-r--r--vendor/github.com/onsi/ginkgo/CHANGELOG.md5
-rw-r--r--vendor/github.com/onsi/ginkgo/README.md5
-rw-r--r--vendor/github.com/onsi/ginkgo/config/config.go2
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go4
-rw-r--r--vendor/github.com/onsi/gomega/CHANGELOG.md5
-rw-r--r--vendor/github.com/onsi/gomega/gomega_dsl.go35
-rw-r--r--vendor/github.com/ulikunitz/xz/TODO.md5
-rw-r--r--vendor/github.com/ulikunitz/xz/bits.go7
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/bar.go28
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/cwriter/util_bsd.go7
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/cwriter/util_linux.go7
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/cwriter/util_solaris.go7
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go28
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go16
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go86
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/decor/counters.go206
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/go.mod3
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/go.sum7
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu.go96
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_aix.go4
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_arm.go33
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_arm64.go31
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_linux.go2
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go2
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go2
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_mips64x.go6
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_mipsx.go2
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_other_arm.go9
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_ppc64x.go16
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_riscv64.go2
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_s390x.go30
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_wasm.go4
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_x86.go31
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux.go13
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux.go20
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_386.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux.go38
-rw-r--r--vendor/modules.txt31
310 files changed, 7738 insertions, 5171 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 1bf35e142..9d220c69a 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -34,28 +34,18 @@ env:
####
#### Cache-image names to test with (double-quotes around names are critical)
###
- FEDORA_NAME: "fedora-32"
- PRIOR_FEDORA_NAME: "fedora-31"
- UBUNTU_NAME: "ubuntu-20"
- PRIOR_UBUNTU_NAME: "ubuntu-19"
+ FEDORA_NAME: "fedora"
+ PRIOR_FEDORA_NAME: "prior-fedora"
+ UBUNTU_NAME: "ubuntu"
+ PRIOR_UBUNTU_NAME: "prior-ubuntu"
- _BUILT_IMAGE_SUFFIX: "podman-6530021898584064"
+ _BUILT_IMAGE_SUFFIX: "c5809900649447424"
FEDORA_CACHE_IMAGE_NAME: "${FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "${PRIOR_FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "${UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}"
PRIOR_UBUNTU_CACHE_IMAGE_NAME: "${PRIOR_UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}"
####
- #### Variables for composing new cache-images (used in PR testing) from
- #### base-images (pre-existing in GCE)
- ####
- BUILT_IMAGE_SUFFIX: "-${CIRRUS_REPO_NAME}-${CIRRUS_BUILD_ID}"
- # Special image w/ nested-libvirt + tools for creating new cache and base images
- IMAGE_BUILDER_CACHE_IMAGE_NAME: "image-builder-image-1541772081"
- # Name where this repositories VM images are stored
- GCP_PROJECT_ID: libpod-218412
-
- ####
#### Default to NOT operating in any special-case testing mode
####
SPECIALMODE: "none" # don't do anything special
@@ -66,8 +56,8 @@ env:
####
#### Credentials and other secret-sauces, decrypted at runtime when authorized.
####
- # Freenode IRC credentials for posting status messages
- IRCID: ENCRYPTED[0c4a3cc4ecda08bc47cd3d31592be8ae5c2bd0151bf3def00a9afd139ef1ab23a1bd0523319d076c027f9749ddb1f3c8]
+ # Name where this repositories VM images are stored
+ GCP_PROJECT_ID: libpod-218412
# Service-account client_email - needed to build images
SERVICE_ACCOUNT: ENCRYPTED[702a8e07e27a6faf7988fcddcc068c2ef2bb182a5aa671f5ccb7fbbfb891c823aa4a7856fb17240766845dbd68bd3f90]
# Service account username part of client_email - for ssh'ing into VMs
@@ -138,38 +128,24 @@ gating_task:
# Verify some aspects of ci/related scripts
ci_script:
- '${GOSRC}/${SCRIPT_BASE}/lib.sh.t |& ${TIMESTAMP}'
- - '/usr/local/bin/entrypoint.sh -C ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/packer test'
- '${GOSRC}/${SCRIPT_BASE}/cirrus_yaml_test.py |& ${TIMESTAMP}'
# Verify expected bash environment (-o pipefail)
pipefail_enabledscript: 'if /bin/false | /bin/true; then echo "pipefail fault" && exit 72; fi'
- on_failure:
- failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh'
-
# Ensure these container images can build
container_image_build_task:
alias: 'container_image_build'
+ name: "build gate image $DEST_BRANCH branch"
depends_on:
- "gating"
- # Only run for PRs, quay.io will automatically build after bramch-push
+ # Only run for PRs, quay.io will automatically build after branch-push
only_if: $CIRRUS_BRANCH != $DEST_BRANCH
- matrix:
- - name: "build in_podman image ${FEDORA_NAME} "
- container:
- dockerfile: Dockerfile
- - name: "build in_podman image ${UBUNTU_NAME}"
- container:
- dockerfile: Dockerfile.ubuntu
- - name: "build gate image $DEST_BRANCH branch"
- container:
- dockerfile: contrib/gate/Dockerfile
-
container:
- dockerfile: Dockerfile
+ dockerfile: contrib/gate/Dockerfile
script: make install.remote
@@ -179,7 +155,6 @@ container_image_build_task:
rpmbuild_task:
only_if: >-
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:IMG.*' &&
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
$CIRRUS_BRANCH != $DEST_BRANCH
@@ -197,18 +172,13 @@ rpmbuild_task:
- 'make -C ${CIRRUS_WORKING_DIR} -f ${CIRRUS_WORKING_DIR}/.copr/Makefile'
- 'rpmbuild --rebuild ${CIRRUS_WORKING_DIR}/podman-*.src.rpm'
- on_failure:
- failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh |& ${TIMESTAMP}'
-
# This task runs `make vendor` followed by ./hack/tree_status.sh to check
# whether the git tree is clean. The reasoning for that is to make sure
# that the vendor.conf, the code and the vendored packages in ./vendor are
# in sync at all times.
vendor_task:
- only_if: >-
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:IMG.*' &&
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
+ only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
depends_on:
- "gating"
@@ -230,17 +200,12 @@ vendor_task:
- 'cd ${CIRRUS_WORKING_DIR} && make vendor'
- 'cd ${CIRRUS_WORKING_DIR} && ./hack/tree_status.sh'
- on_failure:
- failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh |& ${TIMESTAMP}'
-
# This task runs `make varlink_api_generate` followed by ./hack/tree_status.sh to check
# whether the git tree is clean.
varlink_api_task:
- only_if: >-
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:IMG.*' &&
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
+ only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
depends_on:
- "gating"
@@ -265,9 +230,6 @@ varlink_api_task:
- '/usr/local/bin/entrypoint.sh BUILDTAGS="varlink" varlink_api_generate |& ${TIMESTAMP}'
- 'cd ${GOSRC} && ./hack/tree_status.sh |& ${TIMESTAMP}'
- on_failure:
- failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh'
-
build_each_commit_task:
@@ -278,7 +240,6 @@ build_each_commit_task:
only_if: >-
$CIRRUS_BRANCH != $DEST_BRANCH &&
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:IMG.*' &&
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
gce_instance:
@@ -297,9 +258,6 @@ build_each_commit_task:
- 'git fetch --depth 50 origin $DEST_BRANCH |& ${TIMESTAMP}'
- 'make build-all-new-commits GIT_BASE_BRANCH=origin/$DEST_BRANCH |& ${TIMESTAMP}'
- on_failure:
- failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh'
-
build_without_cgo_task:
@@ -310,7 +268,6 @@ build_without_cgo_task:
only_if: >-
$CIRRUS_BRANCH != $DEST_BRANCH &&
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:IMG.*' &&
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
gce_instance:
@@ -327,9 +284,6 @@ build_without_cgo_task:
- 'source $SCRIPT_BASE/lib.sh'
- 'make build-no-cgo'
- on_failure:
- failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh'
-
# Update metadata on VM images referenced by this repository state
meta_task:
@@ -360,32 +314,6 @@ meta_task:
script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/update_meta.sh |& ${TIMESTAMP}'
-# Remove old and disused images based on labels set by meta_task
-image_prune_task:
-
- # This should ONLY ever run from the master branch, and never
- # anywhere else so it's behavior is always consistent, even
- # as new branches are created.
- only_if: $CIRRUS_BRANCH == "master"
-
- depends_on:
- - "meta"
-
- container:
- image: "quay.io/libpod/imgprune:master" # see contrib/imgprune
- cpu: 1
- memory: 1
-
- env:
- <<: *meta_env_vars
- GCPJSON: ENCRYPTED[766916fedf780cbc16ac3152f7f73c5d9dcf64768fc6e80b0858c5badd31e7b41f3c864405c814189fd340e5a056ba18]
- GCPNAME: ENCRYPTED[d6869741209b8cf380adb8a3858cbce4542c9cf115452fcd2024a176b08fce10112e8bf0fbcc2f0033e7b87ef4342b3a]
-
- timeout_in: 10m
-
- script: '/usr/local/bin/entrypoint.sh |& ${TIMESTAMP}'
-
-
# This task does the unit and integration testing for every platform
testing_task:
@@ -399,9 +327,7 @@ testing_task:
- "container_image_build"
# Only test build cache-images, if that's what's requested
- only_if: >-
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:IMG.*' &&
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
+ only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
matrix:
- name: "test ${FEDORA_NAME}"
@@ -435,9 +361,6 @@ testing_task:
path: "*.tar.gz"
type: "application/x-tar"
- on_failure:
- failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh'
-
always: &standardlogs
package_versions_script: '$SCRIPT_BASE/logcollector.sh packages'
ginkgo_node_logs_script: '$SCRIPT_BASE/logcollector.sh ginkgo'
@@ -460,9 +383,7 @@ special_testing_rootless_task:
- "build_each_commit"
- "build_without_cgo"
- only_if: >-
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:IMG.*' &&
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
+ only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
env:
ADD_SECOND_PARTITION: 'true'
@@ -477,9 +398,6 @@ special_testing_rootless_task:
system_test_script: '$SCRIPT_BASE/system_test.sh |& ${TIMESTAMP} | ${LOGFORMAT} system_test'
apiv2_test_script: '$SCRIPT_BASE/apiv2_test.sh |& ${TIMESTAMP} | ${LOGFORMAT} apiv2_test'
- on_failure:
- failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh'
-
always:
<<: *standardlogs
@@ -494,9 +412,7 @@ special_testing_in_podman_task:
- "build_each_commit"
- "build_without_cgo"
- only_if: >-
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:IMG.*' &&
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
+ only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
matrix:
- name: "in-podman ${PRIOR_FEDORA_NAME}"
@@ -515,9 +431,6 @@ special_testing_in_podman_task:
setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}'
integration_test_script: '$SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP} | ${LOGFORMAT} integration_test'
- on_failure:
- failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh'
-
always:
<<: *standardlogs
@@ -530,9 +443,7 @@ special_testing_cross_task:
- "varlink_api"
- "vendor"
- only_if: >-
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:IMG.*' &&
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
+ only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
matrix:
- name: 'cross-platform: windows'
@@ -548,9 +459,6 @@ special_testing_cross_task:
setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}'
build_release_script: '$SCRIPT_BASE/build_release.sh |& ${TIMESTAMP}'
- on_failure:
- failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh'
-
# When examining a particular run, provide convenient access to release files.
zip_artifacts:
path: "*.zip"
@@ -568,9 +476,7 @@ special_testing_bindings_task:
- "varlink_api"
- "vendor"
- only_if: >-
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:IMG.*' &&
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
+ only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
env:
SPECIALMODE: 'bindings' # See docs
@@ -581,9 +487,6 @@ special_testing_bindings_task:
setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}'
integration_test_script: '$SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP} | ${LOGFORMAT} integration_test'
- on_failure:
- failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh'
-
always:
<<: *standardlogs
@@ -595,9 +498,7 @@ special_testing_endpoint_task:
- "varlink_api"
- "vendor"
- only_if: >-
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:IMG.*' &&
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
+ only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
env:
SPECIALMODE: 'endpoint' # See docs
@@ -606,96 +507,12 @@ special_testing_endpoint_task:
setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}'
integration_test_script: '$SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP} | ${LOGFORMAT} integration_test'
-
- on_failure:
- failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh'
-
- always:
- <<: *standardlogs
-
-
-# Test building of new cache-images for future PR testing, in this PR.
-test_build_cache_images_task:
-
- only_if: >-
- $CIRRUS_BRANCH != $DEST_BRANCH &&
- $CIRRUS_CHANGE_TITLE =~ '.*CI:IMG.*' &&
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
-
- depends_on:
- - "gating"
- - 'container_image_build'
-
- # VMs created by packer are not cleaned up by cirrus, must allow task to complete
- auto_cancellation: $CI != "true"
-
- gce_instance:
- image_project: $GCP_PROJECT_ID
- zone: "us-central1-a"
- cpu: 4
- memory: "4Gb"
- disk: 200
- image_name: "${IMAGE_BUILDER_CACHE_IMAGE_NAME}"
- scopes: # required for image building
- - compute
- - devstorage.full_control
-
- networking_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/networking.sh'
- build_vm_images_script: '$SCRIPT_BASE/build_vm_images.sh |& ${TIMESTAMP}'
-
- on_failure:
- failed_df_script: '${DFCMD}'
- failed_journalctl_b_script: 'journalctl -b || echo "Uh oh, journalctl -b failed"'
-
-
-# Test building of new cache-images for future PR testing, in this PR.
-verify_test_built_images_task:
-
- only_if: >-
- $CIRRUS_BRANCH != $DEST_BRANCH &&
- $CIRRUS_CHANGE_TITLE =~ '.*CI:IMG.*' &&
- $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
-
-
- depends_on:
- - "gating"
- - "test_build_cache_images"
-
- gce_instance:
- # Images generated by test_build_cache_images_task (above)
- image_name: "${PACKER_BUILDER_NAME}${BUILT_IMAGE_SUFFIX}"
-
- env:
- ADD_SECOND_PARTITION: 'true'
- matrix:
- - RCLI: 'true'
- - RCLI: 'false'
- matrix:
- PACKER_BUILDER_NAME: "${FEDORA_NAME}"
- PACKER_BUILDER_NAME: "${PRIOR_FEDORA_NAME}"
- PACKER_BUILDER_NAME: "${UBUNTU_NAME}"
- PACKER_BUILDER_NAME: "${PRIOR_UBUNTU_NAME}"
-
- networking_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/networking.sh'
- installed_packages_script: '$SCRIPT_BASE/logcollector.sh packages'
- environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}'
- # Verify expectations of built images
- check_image_script: '$SCRIPT_BASE/check_image.sh |& ${TIMESTAMP}'
- # Note: A truncated form of normal testing. It only needs to confirm new images
- # "probably" work. A full round of testing will happen again after $*_CACHE_IMAGE_NAME
- # are updated in this or another PR (w/o '***CIRRUS: TEST IMAGES***').
- integration_test_script: '$SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP}'
- system_test_script: '$SCRIPT_BASE/system_test.sh |& ${TIMESTAMP}'
-
always:
<<: *standardlogs
docs_task:
- # Don't run this when building/testing new VM images
- only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:IMG.*'
-
depends_on:
- "gating"
env:
@@ -724,7 +541,6 @@ success_task:
- "build_without_cgo"
- "container_image_build"
- "meta"
- - "image_prune"
- "testing"
- "rpmbuild"
- "special_testing_rootless"
@@ -732,10 +548,9 @@ success_task:
- "special_testing_cross"
- "special_testing_endpoint"
- "special_testing_bindings"
- - "test_build_cache_images"
- - "verify_test_built_images"
- "docs"
- "static_build"
+ - "darwin_build"
env:
CIRRUS_WORKING_DIR: "/usr/src/libpod"
@@ -748,7 +563,7 @@ success_task:
cpu: 1
memory: 1
- success_script: '/usr/local/bin/entrypoint.sh ./$SCRIPT_BASE/success.sh |& ${TIMESTAMP}'
+ success_script: /bin/true
# Build the static binary
static_build_task:
@@ -784,3 +599,18 @@ static_build_task:
binaries_artifacts:
path: "result/bin/podman"
+
+
+darwin_build_task:
+ depends_on:
+ - "gating"
+ osx_instance:
+ image: catalina-base
+ setup-script:
+ - brew install go
+ - brew install go-md2man
+ build-script:
+ - make podman-remote-darwin
+ - make install-podman-remote-darwin-docs
+ binaries_artifacts:
+ path: "bin/podman-remote-darwin"
diff --git a/Dockerfile b/Dockerfile
deleted file mode 100644
index c5120e440..000000000
--- a/Dockerfile
+++ /dev/null
@@ -1,21 +0,0 @@
-FROM registry.fedoraproject.org/fedora:latest
-
-# This container image is utilized by the containers CI automation system
-# for building and testing libpod inside a container environment.
-# It is assumed that the source to be tested will overwrite $GOSRC (below)
-# at runtime.
-ENV GOPATH=/var/tmp/go
-ENV GOSRC=$GOPATH/src/github.com/containers/podman
-ENV SCRIPT_BASE=./contrib/cirrus
-ENV PACKER_BASE=$SCRIPT_BASE/packer
-
-ADD / $GOSRC
-WORKDIR $GOSRC
-
-# Re-use repositories and package setup as in VMs under CI
-RUN bash $PACKER_BASE/fedora_packaging.sh && \
- dnf clean all && \
- rm -rf /var/cache/dnf
-
-# Mirror steps taken under CI
-RUN bash -c 'source $GOSRC/$SCRIPT_BASE/lib.sh && install_test_configs'
diff --git a/Dockerfile.ubuntu b/Dockerfile.ubuntu
deleted file mode 100644
index df4ebf3d4..000000000
--- a/Dockerfile.ubuntu
+++ /dev/null
@@ -1,28 +0,0 @@
-# Must resemble $UBUNTU_BASE_IMAGE in ./contrib/cirrus/lib.sh
-FROM ubuntu:20.04
-
-# This container image is intended for building and testing libpod
-# from inside a container environment. It is assumed that the source
-# to be tested will overwrite $GOSRC (below) at runtime.
-ENV GOPATH=/var/tmp/go
-ENV GOSRC=$GOPATH/src/github.com/containers/podman
-ENV SCRIPT_BASE=./contrib/cirrus
-ENV PACKER_BASE=$SCRIPT_BASE/packer
-
-RUN export DEBIAN_FRONTEND="noninteractive" && \
- apt-get -qq update --yes && \
- apt-get -qq upgrade --yes && \
- apt-get -qq install curl git && \
- apt-get -qq autoremove --yes && \
- rm -rf /var/cache/apt
-
-ADD / $GOSRC
-WORKDIR $GOSRC
-
-# Re-use repositories and package setup as in VMs under CI
-RUN bash $PACKER_BASE/ubuntu_packaging.sh && \
- apt-get -qq autoremove --yes && \
- rm -rf /var/cache/apt
-
-# Mirror steps taken under CI
-RUN bash -c 'source $GOSRC/$SCRIPT_BASE/lib.sh && install_test_configs'
diff --git a/Makefile b/Makefile
index 8ce1946ee..07ff21445 100644
--- a/Makefile
+++ b/Makefile
@@ -106,7 +106,10 @@ GOMD2MAN ?= $(shell command -v go-md2man || echo '$(GOBIN)/go-md2man')
CROSS_BUILD_TARGETS := \
bin/podman.cross.linux.amd64 \
bin/podman.cross.linux.ppc64le \
- bin/podman.cross.linux.arm
+ bin/podman.cross.linux.arm \
+ bin/podman.cross.linux.arm64 \
+ bin/podman.cross.linux.386 \
+ bin/podman.cross.linux.s390x
.PHONY: all
all: binaries docs
diff --git a/README.md b/README.md
index cf42edc2e..5a316f170 100644
--- a/README.md
+++ b/README.md
@@ -177,8 +177,8 @@ familiar container cli commands. For more details, see the
[Container Tools Guide](https://github.com/containers/buildah/tree/master/docs/containertools).
## Podman Legacy API (Varlink)
-Podman offers a Varlink-based API for remote management of containers.
-However, this API has been deprecated by the REST API.
+Podman offers a [Varlink-based API](https://github.com/containers/podman/blob/master/docs/tutorials/varlink_remote_client.md)
+for remote management of containers. However, this API has been deprecated by the REST API.
Varlink support is in maintenance mode, and will be removed in a future release.
For more details, you can see [this blog](https://podman.io/blogs/2020/01/17/podman-new-api.html).
diff --git a/cmd/podman/containers/ps.go b/cmd/podman/containers/ps.go
index ebb6ed98f..2aa3b3a9b 100644
--- a/cmd/podman/containers/ps.go
+++ b/cmd/podman/containers/ps.go
@@ -13,6 +13,7 @@ import (
tm "github.com/buger/goterm"
"github.com/containers/buildah/pkg/formats"
"github.com/containers/podman/v2/cmd/podman/registry"
+ "github.com/containers/podman/v2/cmd/podman/utils"
"github.com/containers/podman/v2/cmd/podman/validate"
"github.com/containers/podman/v2/pkg/domain/entities"
"github.com/cri-o/ocicni/pkg/ocicni"
@@ -56,9 +57,9 @@ func init() {
func listFlagSet(flags *pflag.FlagSet) {
flags.BoolVarP(&listOpts.All, "all", "a", false, "Show all the containers, default is only running containers")
flags.StringSliceVarP(&filters, "filter", "f", []string{}, "Filter output based on conditions given")
+ flags.BoolVar(&listOpts.Storage, "storage", false, "Show containers in storage not controlled by Podman")
flags.StringVar(&listOpts.Format, "format", "", "Pretty-print containers to JSON or using a Go template")
flags.IntVarP(&listOpts.Last, "last", "n", -1, "Print the n last created containers (all states)")
- flags.BoolVar(&listOpts.Namespace, "namespace", false, "Display namespace information")
flags.BoolVar(&listOpts.Namespace, "ns", false, "Display namespace information")
flags.BoolVar(&noTrunc, "no-trunc", false, "Display the extended information")
flags.BoolVarP(&listOpts.Pod, "pod", "p", false, "Print the ID and name of the pod the containers are associated with")
@@ -69,6 +70,7 @@ func listFlagSet(flags *pflag.FlagSet) {
sort := validate.Value(&listOpts.Sort, "command", "created", "id", "image", "names", "runningfor", "size", "status")
flags.Var(sort, "sort", "Sort output by: "+sort.Choices())
+ flags.SetNormalizeFunc(utils.AliasFlags)
}
func checkFlags(c *cobra.Command) error {
// latest, and last are mutually exclusive.
@@ -102,6 +104,14 @@ func checkFlags(c *cobra.Command) error {
if listOpts.Watch > 0 && listOpts.Latest {
return errors.New("the watch and latest flags cannot be used together")
}
+ cfg := registry.PodmanConfig()
+ if cfg.Engine.Namespace != "" {
+ if c.Flag("storage").Changed && listOpts.Storage {
+ return errors.New("--namespace and --storage flags can not both be set")
+ }
+ listOpts.Storage = false
+ }
+
return nil
}
diff --git a/cmd/podman/containers/runlabel.go b/cmd/podman/containers/runlabel.go
index 81eec2a42..5ee8c9d6c 100644
--- a/cmd/podman/containers/runlabel.go
+++ b/cmd/podman/containers/runlabel.go
@@ -30,7 +30,7 @@ var (
RunE: runlabel,
Args: cobra.MinimumNArgs(2),
Example: `podman container runlabel run imageID
- podman container runlabel --pull install imageID arg1 arg2
+ podman container runlabel install imageID arg1 arg2
podman container runlabel --display run myImage`,
}
)
@@ -51,7 +51,7 @@ func init() {
flags.StringVar(&runlabelOptions.Optional1, "opt1", "", "Optional parameter to pass for install")
flags.StringVar(&runlabelOptions.Optional2, "opt2", "", "Optional parameter to pass for install")
flags.StringVar(&runlabelOptions.Optional3, "opt3", "", "Optional parameter to pass for install")
- flags.BoolP("pull", "p", false, "Pull the image if it does not exist locally prior to executing the label contents")
+ flags.BoolVarP(&runlabelOptions.Pull, "pull", "p", true, "Pull the image if it does not exist locally prior to executing the label contents")
flags.BoolVarP(&runlabelOptions.Quiet, "quiet", "q", false, "Suppress output information when installing images")
flags.BoolVar(&runlabelOptions.Replace, "replace", false, "Replace existing container with a new one from the image")
flags.StringVar(&runlabelOptions.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
@@ -61,6 +61,7 @@ func init() {
_ = flags.MarkHidden("opt1")
_ = flags.MarkHidden("opt2")
_ = flags.MarkHidden("opt3")
+ _ = flags.MarkHidden("pull")
_ = flags.MarkHidden("signature-policy")
if err := flags.MarkDeprecated("pull", "podman will pull if not found in local storage"); err != nil {
diff --git a/cmd/podman/generate/systemd.go b/cmd/podman/generate/systemd.go
index 851a104bc..f690836a4 100644
--- a/cmd/podman/generate/systemd.go
+++ b/cmd/podman/generate/systemd.go
@@ -1,15 +1,22 @@
package pods
import (
+ "encoding/json"
"fmt"
+ "os"
+ "path/filepath"
"github.com/containers/podman/v2/cmd/podman/registry"
"github.com/containers/podman/v2/cmd/podman/utils"
"github.com/containers/podman/v2/pkg/domain/entities"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var (
+ files bool
+ format string
systemdTimeout uint
systemdOptions = entities.GenerateSystemdOptions{}
systemdDescription = `Generate systemd units for a pod or container.
@@ -29,19 +36,20 @@ var (
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
- Mode: []entities.EngineMode{entities.ABIMode},
+ Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: systemdCmd,
Parent: generateCmd,
})
flags := systemdCmd.Flags()
flags.BoolVarP(&systemdOptions.Name, "name", "n", false, "Use container/pod names instead of IDs")
- flags.BoolVarP(&systemdOptions.Files, "files", "f", false, "Generate .service files instead of printing to stdout")
+ flags.BoolVarP(&files, "files", "f", false, "Generate .service files instead of printing to stdout")
flags.UintVarP(&systemdTimeout, "time", "t", containerConfig.Engine.StopTimeout, "Stop timeout override")
flags.StringVar(&systemdOptions.RestartPolicy, "restart-policy", "on-failure", "Systemd restart-policy")
flags.BoolVarP(&systemdOptions.New, "new", "", false, "Create a new container instead of starting an existing one")
flags.StringVar(&systemdOptions.ContainerPrefix, "container-prefix", "container", "Systemd unit name prefix for containers")
flags.StringVar(&systemdOptions.PodPrefix, "pod-prefix", "pod", "Systemd unit name prefix for pods")
flags.StringVar(&systemdOptions.Separator, "separator", "-", "Systemd unit name separator between name/id and prefix")
+ flags.StringVar(&format, "format", "", "Print the created units in specified format (json)")
flags.SetNormalizeFunc(utils.AliasFlags)
}
@@ -50,11 +58,68 @@ func systemd(cmd *cobra.Command, args []string) error {
systemdOptions.StopTimeout = &systemdTimeout
}
+ if registry.IsRemote() {
+ logrus.Warnln("The generated units should be placed on your remote system")
+ }
+
report, err := registry.ContainerEngine().GenerateSystemd(registry.GetContext(), args[0], systemdOptions)
if err != nil {
return err
}
- fmt.Println(report.Output)
+ if files {
+ cwd, err := os.Getwd()
+ if err != nil {
+ return errors.Wrap(err, "error getting current working directory")
+ }
+ for name, content := range report.Units {
+ path := filepath.Join(cwd, fmt.Sprintf("%s.service", name))
+ f, err := os.Create(path)
+ if err != nil {
+ return err
+ }
+ _, err = f.WriteString(content)
+ if err != nil {
+ return err
+ }
+ err = f.Close()
+ if err != nil {
+ return err
+ }
+
+ // add newline if default format is given
+ if format == "" {
+ path += "\n"
+ }
+ // modify in place so we can print the
+ // paths when --files is set
+ report.Units[name] = path
+ }
+ }
+
+ switch format {
+ case "json":
+ return printJSON(report.Units)
+ case "":
+ return printDefault(report.Units)
+ default:
+ return errors.Errorf("unknown --format argument: %s", format)
+ }
+
+}
+
+func printDefault(units map[string]string) error {
+ for _, content := range units {
+ fmt.Print(content)
+ }
+ return nil
+}
+
+func printJSON(units map[string]string) error {
+ b, err := json.MarshalIndent(units, "", " ")
+ if err != nil {
+ return err
+ }
+ fmt.Println(string(b))
return nil
}
diff --git a/cmd/podman/images/build.go b/cmd/podman/images/build.go
index 400f960cc..923109b15 100644
--- a/cmd/podman/images/build.go
+++ b/cmd/podman/images/build.go
@@ -211,7 +211,16 @@ func build(cmd *cobra.Command, args []string) error {
return err
}
- apiBuildOpts, err := buildFlagsWrapperToOptions(cmd, contextDir, &buildOpts)
+ var logfile *os.File
+ if cmd.Flag("logfile").Changed {
+ logfile, err = os.OpenFile(buildOpts.Logfile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
+ if err != nil {
+ return errors.Errorf("error opening logfile %q: %v", buildOpts.Logfile, err)
+ }
+ defer logfile.Close()
+ }
+
+ apiBuildOpts, err := buildFlagsWrapperToOptions(cmd, contextDir, &buildOpts, logfile)
if err != nil {
return err
}
@@ -225,7 +234,7 @@ func build(cmd *cobra.Command, args []string) error {
// conversion here prevents the API from doing that (redundantly).
//
// TODO: this code should really be in Buildah.
-func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buildFlagsWrapper) (*entities.BuildOptions, error) {
+func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buildFlagsWrapper, logfile *os.File) (*entities.BuildOptions, error) {
output := ""
tags := []string{}
if c.Flag("tag").Changed {
@@ -284,16 +293,11 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
stderr = os.Stderr
reporter = os.Stderr
- if c.Flag("logfile").Changed {
- f, err := os.OpenFile(flags.Logfile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
- if err != nil {
- return nil, errors.Errorf("error opening logfile %q: %v", flags.Logfile, err)
- }
- defer f.Close()
- logrus.SetOutput(f)
- stdout = f
- stderr = f
- reporter = f
+ if logfile != nil {
+ logrus.SetOutput(logfile)
+ stdout = logfile
+ stderr = logfile
+ reporter = logfile
}
var memoryLimit, memorySwap int64
diff --git a/cmd/podman/images/save.go b/cmd/podman/images/save.go
index 82a3513f5..c57f61221 100644
--- a/cmd/podman/images/save.go
+++ b/cmd/podman/images/save.go
@@ -16,7 +16,10 @@ import (
"golang.org/x/crypto/ssh/terminal"
)
-var validFormats = []string{define.OCIManifestDir, define.OCIArchive, define.V2s2ManifestDir, define.V2s2Archive}
+var (
+ validFormats = []string{define.OCIManifestDir, define.OCIArchive, define.V2s2ManifestDir, define.V2s2Archive}
+ containerConfig = registry.PodmanConfig()
+)
var (
saveDescription = `Save an image to docker-archive or oci-archive on the local machine. Default is docker-archive.`
@@ -79,7 +82,7 @@ func saveFlags(flags *pflag.FlagSet) {
flags.StringVar(&saveOpts.Format, "format", define.V2s2Archive, "Save image to oci-archive, oci-dir (directory with oci manifest type), docker-archive, docker-dir (directory with v2s2 manifest type)")
flags.StringVarP(&saveOpts.Output, "output", "o", "", "Write to a specified file (default: stdout, which must be redirected)")
flags.BoolVarP(&saveOpts.Quiet, "quiet", "q", false, "Suppress the output")
-
+ flags.BoolVarP(&saveOpts.MultiImageArchive, "multi-image-archive", "m", containerConfig.Engine.MultiImageArchive, "Interpret additional arguments as images not tags and create a multi-image-archive (only for docker-archive)")
}
func save(cmd *cobra.Command, args []string) (finalErr error) {
@@ -118,6 +121,13 @@ func save(cmd *cobra.Command, args []string) (finalErr error) {
if len(args) > 1 {
tags = args[1:]
}
+
+ // Decide whether c/image's progress bars should use stderr or stdout.
+ // If the output is set of stdout, any log message there would corrupt
+ // the tarfile.
+ if saveOpts.Output == os.Stdout.Name() {
+ saveOpts.Quiet = true
+ }
err := registry.ImageEngine().Save(context.Background(), args[0], tags, saveOpts)
if err == nil {
succeeded = true
diff --git a/cmd/podman/manifest/add.go b/cmd/podman/manifest/add.go
index ca633263d..128bf66a7 100644
--- a/cmd/podman/manifest/add.go
+++ b/cmd/podman/manifest/add.go
@@ -4,14 +4,26 @@ import (
"context"
"fmt"
+ "github.com/containers/common/pkg/auth"
+ "github.com/containers/image/v5/types"
"github.com/containers/podman/v2/cmd/podman/registry"
"github.com/containers/podman/v2/pkg/domain/entities"
+ "github.com/containers/podman/v2/pkg/util"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
+// manifestAddOptsWrapper wraps entities.ManifestAddOptions and prevents leaking
+// CLI-only fields into the API types.
+type manifestAddOptsWrapper struct {
+ entities.ManifestAddOptions
+
+ TLSVerifyCLI bool // CLI only
+ CredentialsCLI string
+}
+
var (
- manifestAddOpts = entities.ManifestAddOptions{}
+ manifestAddOpts = manifestAddOptsWrapper{}
addCmd = &cobra.Command{
Use: "add [flags] LIST LIST",
Short: "Add images to a manifest list or image index",
@@ -33,15 +45,48 @@ func init() {
flags.BoolVar(&manifestAddOpts.All, "all", false, "add all of the list's images if the image is a list")
flags.StringSliceVar(&manifestAddOpts.Annotation, "annotation", nil, "set an `annotation` for the specified image")
flags.StringVar(&manifestAddOpts.Arch, "arch", "", "override the `architecture` of the specified image")
+ flags.StringVar(&manifestAddOpts.Authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&manifestAddOpts.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
+ flags.StringVar(&manifestAddOpts.CredentialsCLI, "creds", "", "use `[username[:password]]` for accessing the registry")
+
flags.StringSliceVar(&manifestAddOpts.Features, "features", nil, "override the `features` of the specified image")
flags.StringVar(&manifestAddOpts.OS, "os", "", "override the `OS` of the specified image")
flags.StringVar(&manifestAddOpts.OSVersion, "os-version", "", "override the OS `version` of the specified image")
+ flags.BoolVar(&manifestAddOpts.TLSVerifyCLI, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
flags.StringVar(&manifestAddOpts.Variant, "variant", "", "override the `Variant` of the specified image")
+
+ if registry.IsRemote() {
+ _ = flags.MarkHidden("authfile")
+ _ = flags.MarkHidden("cert-dir")
+ _ = flags.MarkHidden("tls-verify")
+ }
}
func add(cmd *cobra.Command, args []string) error {
+ if err := auth.CheckAuthFile(manifestPushOpts.Authfile); err != nil {
+ return err
+ }
+
manifestAddOpts.Images = []string{args[1], args[0]}
- listID, err := registry.ImageEngine().ManifestAdd(context.Background(), manifestAddOpts)
+
+ if manifestAddOpts.CredentialsCLI != "" {
+ creds, err := util.ParseRegistryCreds(manifestAddOpts.CredentialsCLI)
+ if err != nil {
+ return err
+ }
+ manifestAddOpts.Username = creds.Username
+ manifestAddOpts.Password = creds.Password
+ }
+
+ // TLS verification in c/image is controlled via a `types.OptionalBool`
+ // which allows for distinguishing among set-true, set-false, unspecified
+ // which is important to implement a sane way of dealing with defaults of
+ // boolean CLI flags.
+ if cmd.Flags().Changed("tls-verify") {
+ manifestAddOpts.SkipTLSVerify = types.NewOptionalBool(!manifestAddOpts.TLSVerifyCLI)
+ }
+
+ listID, err := registry.ImageEngine().ManifestAdd(context.Background(), manifestAddOpts.ManifestAddOptions)
if err != nil {
return errors.Wrapf(err, "error adding to manifest list %s", args[0])
}
diff --git a/cmd/podman/networks/create.go b/cmd/podman/networks/create.go
index dabf6f0d2..68a577ae1 100644
--- a/cmd/podman/networks/create.go
+++ b/cmd/podman/networks/create.go
@@ -21,9 +21,6 @@ var (
RunE: networkCreate,
Args: cobra.MaximumNArgs(1),
Example: `podman network create podman1`,
- Annotations: map[string]string{
- registry.ParentNSRequired: "",
- },
}
)
diff --git a/cmd/podman/networks/inspect.go b/cmd/podman/networks/inspect.go
index f00d6b63c..c5872def7 100644
--- a/cmd/podman/networks/inspect.go
+++ b/cmd/podman/networks/inspect.go
@@ -22,9 +22,6 @@ var (
RunE: networkInspect,
Example: `podman network inspect podman`,
Args: cobra.MinimumNArgs(1),
- Annotations: map[string]string{
- registry.ParentNSRequired: "",
- },
}
)
diff --git a/cmd/podman/networks/list.go b/cmd/podman/networks/list.go
index 3a2651cbc..b6fb2bb80 100644
--- a/cmd/podman/networks/list.go
+++ b/cmd/podman/networks/list.go
@@ -25,9 +25,6 @@ var (
Long: networklistDescription,
RunE: networkList,
Example: `podman network list`,
- Annotations: map[string]string{
- registry.ParentNSRequired: "",
- },
}
)
diff --git a/cmd/podman/networks/rm.go b/cmd/podman/networks/rm.go
index dfbb5d081..ac49993b7 100644
--- a/cmd/podman/networks/rm.go
+++ b/cmd/podman/networks/rm.go
@@ -19,9 +19,6 @@ var (
RunE: networkRm,
Example: `podman network rm podman`,
Args: cobra.MinimumNArgs(1),
- Annotations: map[string]string{
- registry.ParentNSRequired: "",
- },
}
)
diff --git a/cmd/podman/root.go b/cmd/podman/root.go
index 749a5fbe7..6cf369f0a 100644
--- a/cmd/podman/root.go
+++ b/cmd/podman/root.go
@@ -104,8 +104,8 @@ func persistentPreRunE(cmd *cobra.Command, args []string) error {
// TODO: Remove trace statement in podman V2.1
logrus.Debugf("Called %s.PersistentPreRunE(%s)", cmd.Name(), strings.Join(os.Args, " "))
- // Help is a special case, no need for more setup
- if cmd.Name() == "help" {
+ // Help and commands with subcommands are special cases, no need for more setup
+ if cmd.Name() == "help" || cmd.HasSubCommands() {
return nil
}
@@ -204,8 +204,8 @@ func persistentPostRunE(cmd *cobra.Command, args []string) error {
// TODO: Remove trace statement in podman V2.1
logrus.Debugf("Called %s.PersistentPostRunE(%s)", cmd.Name(), strings.Join(os.Args, " "))
- // Help is a special case, no need for more cleanup
- if cmd.Name() == "help" {
+ // Help and commands with subcommands are special cases, no need for more cleanup
+ if cmd.Name() == "help" || cmd.HasSubCommands() {
return nil
}
diff --git a/cmd/podman/system/df.go b/cmd/podman/system/df.go
index 03991101e..b262c8478 100644
--- a/cmd/podman/system/df.go
+++ b/cmd/podman/system/df.go
@@ -134,7 +134,7 @@ func printSummary(reports *entities.SystemDfReport, userFormat string) error {
for _, v := range reports.Volumes {
activeVolumes += v.Links
volumesSize += v.Size
- volumesReclaimable += v.Size
+ volumesReclaimable += v.ReclaimableSize
}
volumeSummary := dfSummary{
Type: "Local Volumes",
@@ -182,7 +182,7 @@ func printVerbose(reports *entities.SystemDfReport) error {
dfContainers = append(dfContainers, &dfContainer{SystemDfContainerReport: d})
}
containerHeaders := "CONTAINER ID\tIMAGE\tCOMMAND\tLOCAL VOLUMES\tSIZE\tCREATED\tSTATUS\tNAMES\n"
- containerRow := "{{.ContainerID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.Created}}\t{{.Status}}\t{{.Names}}\n"
+ containerRow := "{{.ContainerID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.RWSize}}\t{{.Created}}\t{{.Status}}\t{{.Names}}\n"
format = containerHeaders + "{{range . }}" + containerRow + "{{end}}"
if err := writeTemplate(w, format, dfContainers); err != nil {
return nil
@@ -257,8 +257,8 @@ func (d *dfContainer) Command() string {
return strings.Join(d.SystemDfContainerReport.Command, " ")
}
-func (d *dfContainer) Size() string {
- return units.HumanSize(float64(d.SystemDfContainerReport.Size))
+func (d *dfContainer) RWSize() string {
+ return units.HumanSize(float64(d.SystemDfContainerReport.RWSize))
}
func (d *dfContainer) Created() string {
diff --git a/cmd/podman/utils/alias.go b/cmd/podman/utils/alias.go
index e484461c5..ff31e82ea 100644
--- a/cmd/podman/utils/alias.go
+++ b/cmd/podman/utils/alias.go
@@ -19,6 +19,8 @@ func AliasFlags(f *pflag.FlagSet, name string) pflag.NormalizedName {
name = "network"
case "timeout":
name = "time"
+ case "namespace":
+ name = "ns"
}
return pflag.NormalizedName(name)
}
diff --git a/completions/bash/podman b/completions/bash/podman
index 3b50af1a9..e8185235b 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -1846,6 +1846,9 @@ _podman_manifest() {
_podman_manifest_add() {
local options_with_args="
--annotation
+ --authfile
+ --cert-dir
+ --creds
--arch
--features
--os
@@ -1857,6 +1860,7 @@ _podman_manifest_add() {
--all
--help
-h
+ --tls-verify
"
_complete_ "$options_with_args" "$boolean_options"
@@ -2679,6 +2683,7 @@ _podman_ps() {
--pod -p
--quiet -q
--size -s
+ --storage
--namespace --ns
--sync
"
diff --git a/contrib/cirrus/README.md b/contrib/cirrus/README.md
index 977762293..f66560cc8 100644
--- a/contrib/cirrus/README.md
+++ b/contrib/cirrus/README.md
@@ -76,95 +76,6 @@ exercising cgroups v2 with Podman integration tests. Also depends on
having `SPECIALMODE` set to 'cgroupv2`
-### ``test_build_cache_images_task`` Task
-
-Modifying the contents of cache-images is tested by making changes to
-one or more of the ``./contrib/cirrus/packer/*_setup.sh`` files. Then
-in the PR description, add the magic string: ``[CI:IMG]``
-
-***N/B: Steps below are performed by automation***
-
-1. ``setup_environment.sh``: Same as for other tasks.
-
-2. ``build_vm_images.sh``: Utilize [the packer tool](http://packer.io/docs/)
- to produce new VM images. Create a new VM from each base-image, connect
- to them with ``ssh``, and perform the steps as defined by the
- ``$PACKER_BASE/libpod_images.yml`` file:
-
- 1. On a base-image VM, as root, copy the current state of the repository
- into ``/tmp/libpod``.
- 2. Execute distribution-specific scripts to prepare the image for
- use. For example, ``fedora_setup.sh``.
- 3. If successful, shut down each VM and record the names, and dates
- into a json manifest file.
- 4. Move the manifest file, into a google storage bucket object.
- This is a retained as a secondary method for tracking/auditing
- creation of VM images, should it ever be needed.
-
-### ``verify_test_built_images`` Task
-
-Only runs following successful ``test_build_cache_images_task`` task. Uses
-images following the standard naming format; ***however, only runs a limited
-sub-set of automated tests***. Validating newly built images fully, requires
-updating ``.cirrus.yml``.
-
-***N/B: Steps below are performed by automation***
-
-1. Using the just build VM images, launch VMs and wait for them to boot.
-
-2. Execute the `setup_environment.sh` as in the `testing` task.
-
-2. Execute the `integration_test.sh` as in the `testing` task.
-
-
-***Manual Steps:*** Assuming the automated steps pass, then
-you'll find the new image names displayed at the end of the
-`test_build_cache_images`. For example:
-
-
-```
-...cut...
-
-[+0747s] ==> Builds finished. The artifacts of successful builds are:
-[+0747s] --> ubuntu-18: A disk image was created: ubuntu-18-libpod-5664838702858240
-[+0747s] --> fedora-29: A disk image was created: fedora-29-libpod-5664838702858240
-[+0747s] --> fedora-30: A disk image was created: fedora-30-libpod-5664838702858240
-[+0747s] --> ubuntu-19: A disk image was created: ubuntu-19-libpod-5664838702858240
-```
-
-Notice the suffix on all the image names comes from the env. var. set in
-*.cirrus.yml*: `BUILT_IMAGE_SUFFIX: "-${CIRRUS_REPO_NAME}-${CIRRUS_BUILD_ID}"`.
-Edit `.cirrus.yml`, in the top-level `env` section, update the suffix variable
-used at runtime to launch VMs for testing:
-
-
-```yaml
-env:
- ...cut...
- ####
- #### Cache-image names to test with (double-quotes around names are critical)
- ###
- _BUILT_IMAGE_SUFFIX: "libpod-5664838702858240"
- FEDORA_CACHE_IMAGE_NAME: "fedora-30-${_BUILT_IMAGE_SUFFIX}"
- PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-29-${_BUILT_IMAGE_SUFFIX}"
- ...cut...
-```
-
-***NOTES:***
-* If re-using the same PR with new images in `.cirrus.yml`,
- take care to also *update the PR description* to remove
- the magic ``[CI:IMG]`` string. Keeping it and
- `--force` pushing would needlessly cause Cirrus-CI to build
- and test images again.
-* In the future, if you need to review the log from the build that produced
- the referenced image:
-
- * Note the Build ID from the image name (for example `5664838702858240`).
- * Go to that build in the Cirrus-CI WebUI, using the build ID in the URL.
- (For example `https://cirrus-ci.com/build/5664838702858240`.
- * Choose the *test_build_cache_images* task.
- * Open the *build_vm_images* script section.
-
### `docs` Task
Builds swagger API documentation YAML and uploads to google storage (an online
@@ -226,99 +137,6 @@ gsutil cors set /path/to/file.json gs://libpod-master-releases
file. Therefore, if it is not functioning or misconfigured, a person must have altered it or
changes were made to the referring site (e.g. `docs.podman.io`).
-## Base-images
-
-Base-images are VM disk-images specially prepared for executing as GCE VMs.
-In particular, they run services on startup similar in purpose/function
-as the standard 'cloud-init' services.
-
-* The google services are required for full support of ssh-key management
- and GCE OAuth capabilities. Google provides native images in GCE
- with services pre-installed, for many platforms. For example,
- RHEL, CentOS, and Ubuntu.
-
-* Google does ***not*** provide any images for Fedora (as of 5/2019), nor do
- they provide a base-image prepared to run packer for creating other images
- in the ``test_build_vm_images`` Task (above).
-
-* Base images do not need to be produced often, but doing so completely
- manually would be time-consuming and error-prone. Therefore a special
- semi-automatic *Makefile* target is provided to assist with producing
- all the base-images: ``libpod_base_images``
-
-To produce new base-images, including an `image-builder-image` (used by
-the ``cache_images`` Task) some input parameters are required:
-
-* ``GCP_PROJECT_ID``: The complete GCP project ID string e.g. foobar-12345
- identifying where the images will be stored.
-
-* ``GOOGLE_APPLICATION_CREDENTIALS``: A *JSON* file containing
- credentials for a GCE service account. This can be [a service
- account](https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually)
- or [end-user
- credentials](https://cloud.google.com/docs/authentication/end-user#creating_your_client_credentials)
-
-* Optionally, CSV's may be specified to ``PACKER_BUILDS``
- to limit the base-images produced. For example,
- ``PACKER_BUILDS=fedora,image-builder-image``.
-
-If there is no existing 'image-builder-image' within GCE, a new
-one may be bootstrapped by creating a CentOS 7 VM with support for
-nested-virtualization, and with elevated cloud privileges (to access
-GCE, from within the GCE VM). For example:
-
-```
-$ alias pgcloud='sudo podman run -it --rm -e AS_ID=$UID
- -e AS_USER=$USER -v $HOME:$HOME:z quay.io/cevich/gcloud_centos:latest'
-
-$ URL=https://www.googleapis.com/auth
-$ SCOPES=$URL/userinfo.email,$URL/compute,$URL/devstorage.full_control
-
-# The --min-cpu-platform is critical for nested-virt.
-$ pgcloud compute instances create $USER-image-builder \
- --image-family centos-7 \
- --boot-disk-size "200GB" \
- --min-cpu-platform "Intel Haswell" \
- --machine-type n1-standard-2 \
- --scopes $SCOPES
-```
-
-Then from that VM, execute the
-``contrib/cirrus/packer/image-builder-image_base_setup.sh`` script.
-Shutdown the VM, and convert it into a new image-builder-image.
-
-Building new base images is done by first creating a VM from an
-image-builder-image and copying the credentials json file to it.
-
-```
-$ hack/get_ci_vm.sh image-builder-image-1541772081
-...in another terminal...
-$ pgcloud compute scp /path/to/gac.json $USER-image-builder-image-1541772081:.
-```
-
-Then, on the VM, change to the ``packer`` sub-directory, and build the images:
-
-```
-$ cd libpod/contrib/cirrus/packer
-$ make libpod_base_images GCP_PROJECT_ID=<VALUE> \
- GOOGLE_APPLICATION_CREDENTIALS=/path/to/gac.json \
- PACKER_BUILDS=<OPTIONAL>
-```
-
-Assuming this is successful (hence the semi-automatic part), packer will
-produce a ``packer-manifest.json`` output file. This contains the base-image
-names suitable for updating in ``.cirrus.yml``, `env` keys ``*_BASE_IMAGE``.
-
-On failure, it should be possible to determine the problem from the packer
-output. Sometimes that means setting `PACKER_LOG=1` and troubleshooting
-the nested virt calls. It's also possible to observe the (nested) qemu-kvm
-console output. Simply set the ``TTYDEV`` parameter, for example:
-
-```
-$ make libpod_base_images ... TTYDEV=$(tty)
- ...
-```
-
## `$SPECIALMODE`
Some tasks alter their behavior based on this value. A summary of supported
diff --git a/contrib/cirrus/add_second_partition.sh b/contrib/cirrus/add_second_partition.sh
index 3c2f9f056..d0407be86 100644
--- a/contrib/cirrus/add_second_partition.sh
+++ b/contrib/cirrus/add_second_partition.sh
@@ -7,8 +7,7 @@
SLASH_DEVICE="/dev/sda" # Always the case on GCP
# The unallocated space results from the difference in disk-size between VM Image
-# and runtime request. The check_image.sh test includes a minimum-space check,
-# with the Image size set initially lower by contrib/cirrus/packer/libpod_images.yml
+# and runtime request.
NEW_PART_START="50%"
NEW_PART_END="100%"
diff --git a/contrib/cirrus/build_vm_images.sh b/contrib/cirrus/build_vm_images.sh
deleted file mode 100755
index be1c82185..000000000
--- a/contrib/cirrus/build_vm_images.sh
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-source $(dirname $0)/lib.sh
-
-BASE_IMAGE_VARS='FEDORA_BASE_IMAGE PRIOR_FEDORA_BASE_IMAGE UBUNTU_BASE_IMAGE PRIOR_UBUNTU_BASE_IMAGE'
-ENV_VARS="PACKER_BUILDS BUILT_IMAGE_SUFFIX $BASE_IMAGE_VARS SERVICE_ACCOUNT GCE_SSH_USERNAME GCP_PROJECT_ID PACKER_VER SCRIPT_BASE PACKER_BASE CIRRUS_BUILD_ID CIRRUS_CHANGE_IN_REPO"
-req_env_var $ENV_VARS
-# Must also be made available through make, into packer process
-export $ENV_VARS
-
-# Everything here is running on the 'image-builder-image' GCE image
-# Assume basic dependencies are all met, but there could be a newer version
-# of the packer binary
-PACKER_FILENAME="packer_${PACKER_VER}_linux_amd64.zip"
-if [[ -d "$HOME/packer" ]]
-then
- cd "$HOME/packer"
- # image_builder_image has packer pre-installed, check if same version requested
- if [[ -r "$PACKER_FILENAME" ]]
- then
- cp $PACKER_FILENAME "$GOSRC/$PACKER_BASE/"
- cp packer "$GOSRC/$PACKER_BASE/"
- fi
-fi
-
-cd "$GOSRC/$PACKER_BASE"
-# Add/update labels on base-images used in this build to prevent premature deletion
-ARGS="
-"
-for base_image_var in $BASE_IMAGE_VARS
-do
- # See entrypoint.sh in contrib/imgts and contrib/imgprune
- # These updates can take a while, run them in the background, check later
- gcloud compute images update \
- --update-labels=last-used=$(date +%s) \
- --update-labels=build-id=$CIRRUS_BUILD_ID \
- --update-labels=repo-ref=$CIRRUS_CHANGE_IN_REPO \
- --update-labels=project=$GCP_PROJECT_ID \
- ${!base_image_var} &
-done
-
-make libpod_images \
- PACKER_BUILDS=$PACKER_BUILDS \
- PACKER_VER=$PACKER_VER \
- GOSRC=$GOSRC \
- SCRIPT_BASE=$SCRIPT_BASE \
- PACKER_BASE=$PACKER_BASE \
- BUILT_IMAGE_SUFFIX=$BUILT_IMAGE_SUFFIX
-
-# Separate PR-produced images from those produced on master.
-if [[ "${CIRRUS_BRANCH:-}" == "master" ]]
-then
- POST_MERGE_BUCKET_SUFFIX="-master"
-else
- POST_MERGE_BUCKET_SUFFIX=""
-fi
-
-# When successful, upload manifest of produced images using a filename unique
-# to this build.
-URI="gs://packer-import${POST_MERGE_BUCKET_SUFFIX}/manifest${BUILT_IMAGE_SUFFIX}.json"
-gsutil cp packer-manifest.json "$URI"
-
-# Ensure any background 'gcloud compute images update' processes finish
-wait # No -n option in CentOS, this is the best that can be done :(
-
-echo "Finished. A JSON manifest of produced images is available at $URI"
diff --git a/contrib/cirrus/check_image.sh b/contrib/cirrus/check_image.sh
deleted file mode 100755
index 04867ca64..000000000
--- a/contrib/cirrus/check_image.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env bash
-
-set -eo pipefail
-
-source $(dirname $0)/lib.sh
-
-EVIL_UNITS="$($CIRRUS_WORKING_DIR/$PACKER_BASE/systemd_banish.sh --list)"
-
-req_env_var PACKER_BUILDER_NAME RCLI EVIL_UNITS OS_RELEASE_ID CG_FS_TYPE
-
-NFAILS=0
-echo "Validating VM image"
-
-MIN_SLASH_GIGS=30
-read SLASH_DEVICE SLASH_FSTYPE SLASH_SIZE JUNK <<<$(findmnt --df --first-only --noheadings / | cut -d '.' -f 1)
-SLASH_SIZE_GIGS=$(echo "$SLASH_SIZE" | sed -r -e 's/G|g//')
-item_test "Minimum available disk space" $SLASH_SIZE_GIGS -gt $MIN_SLASH_GIGS || let "NFAILS+=1"
-
-MIN_MEM_MB=2000
-read JUNK TOTAL USED MEM_FREE JUNK <<<$(free -tm | tail -1)
-item_test 'Minimum available memory' $MEM_FREE -ge $MIN_MEM_MB || let "NFAILS+=1"
-
-# We're testing a custom-built podman; make sure there isn't a distro-provided
-# binary anywhere; that could potentially taint our results.
-remove_packaged_podman_files
-item_test "remove_packaged_podman_files() does it's job" -z "$(type -P podman)" || let "NFAILS+=1"
-
-MIN_ZIP_VER='3.0'
-VER_RE='.+([[:digit:]]+\.[[:digit:]]+).+'
-ACTUAL_VER=$(zip --version 2>&1 | egrep -m 1 "Zip$VER_RE" | sed -r -e "s/$VER_RE/\\1/")
-item_test "minimum zip version" "$MIN_ZIP_VER" = $(echo -e "$MIN_ZIP_VER\n$ACTUAL_VER" | sort -V | head -1) || let "NFAILS+=1"
-
-for REQ_UNIT in google-accounts-daemon.service \
- google-clock-skew-daemon.service \
- google-instance-setup.service \
- google-network-daemon.service \
- google-shutdown-scripts.service \
- google-startup-scripts.service
-do
- # enabled/disabled appears at the end of the line, on some Ubuntu's it appears twice
- service_status=$(systemctl list-unit-files --no-legend $REQ_UNIT | tac -s ' ' | head -1)
- item_test "required $REQ_UNIT status is enabled" \
- "$service_status" = "enabled" || let "NFAILS+=1"
-done
-
-for evil_unit in $EVIL_UNITS
-do
- # Exits zero if any unit matching pattern is running
- unit_status=$(systemctl is-active $evil_unit &> /dev/null; echo $?)
- item_test "No $evil_unit unit is present or active:" "$unit_status" -ne "0" || let "NFAILS+=1"
-done
-
-echo "Checking items specific to ${PACKER_BUILDER_NAME}${BUILT_IMAGE_SUFFIX}"
-case "$PACKER_BUILDER_NAME" in
- ubuntu*)
- item_test "On ubuntu, no periodic apt crap is enabled" -z "$(egrep $PERIODIC_APT_RE /etc/apt/apt.conf.d/*)"
- ;;
- fedora*)
- # Only runc -OR- crun should be installed, never both
- case "$CG_FS_TYPE" in
- tmpfs)
- HAS=runc
- HAS_NOT=crun
- ;;
- cgroup2fs)
- HAS=crun
- HAS_NOT=runc
- ;;
- esac
- HAS_RC=$(rpm -qV $HAS &> /dev/null; echo $?)
- HAS_NOT_RC=$(rpm -qV $HAS_NOT &> /dev/null; echo $?)
- item_test "With a cgroups-fs type $CG_FS_TYPE, the $HAS package is installed" $HAS_RC -eq 0
- item_test "With a cgroups-fs type $CG_FS_TYPE, the $HAS_NOT package is not installed" $HAS_NOT_RC -ne 0
- ;;
- xfedora*)
- echo "Kernel Command-line: $(cat /proc/cmdline)"
- item_test \
- "On ${PACKER_BUILDER_NAME} images, the /sys/fs/cgroup/unified directory does NOT exist" \
- "!" "-d" "/sys/fs/cgroup/unified" || let "NFAILS+=1"
- ;;
- *) echo "No vm-image specific items to check"
-esac
-
-echo "Total failed tests: $NFAILS"
-exit $NFAILS
diff --git a/contrib/cirrus/git_authors_to_irc_nicks.csv b/contrib/cirrus/git_authors_to_irc_nicks.csv
deleted file mode 100644
index a584cc76a..000000000
--- a/contrib/cirrus/git_authors_to_irc_nicks.csv
+++ /dev/null
@@ -1,12 +0,0 @@
-# Comma separated mapping of author e-mail, to Freenode IRC nick.
-# When no match is found here, the username portion of the e-mail is used.
-# Sorting is done at runtime - first-found e-mail match wins.
-# Comments (like this) and blank lines are ignored.
-
-bbaude@redhat.com,baude
-matthew.heon@pm.me,mheon
-matthew.heon@gmail.com,mheon
-emilien@redhat.com,EmilienM
-rothberg@redhat.com,vrothberg
-santiago@redhat.com,edsantiago
-gscrivan@redhat.com,giuseppe
diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh
index 3292e9d14..f125dd76d 100644
--- a/contrib/cirrus/lib.sh
+++ b/contrib/cirrus/lib.sh
@@ -35,10 +35,8 @@ export PATH="$HOME/bin:$GOPATH/bin:/usr/local/bin:$PATH"
export LD_LIBRARY_PATH="/usr/local/lib${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}"
# Saves typing / in case location ever moves
SCRIPT_BASE=${SCRIPT_BASE:-./contrib/cirrus}
-PACKER_BASE=${PACKER_BASE:-./contrib/cirrus/packer}
# Important filepaths
SETUP_MARKER_FILEPATH="${SETUP_MARKER_FILEPATH:-/var/tmp/.setup_environment_sh_complete}"
-AUTHOR_NICKS_FILEPATH="${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/git_authors_to_irc_nicks.csv"
# Downloaded, but not installed packages.
PACKAGE_DOWNLOAD_DIR=/var/cache/download
@@ -61,22 +59,15 @@ CONTINUOUS_INTEGRATION="${CONTINUOUS_INTEGRATION:-false}"
CIRRUS_REPO_NAME=${CIRRUS_REPO_NAME:-libpod}
CIRRUS_BASE_SHA=${CIRRUS_BASE_SHA:-unknown$(date +%s)} # difficult to reliably discover
CIRRUS_BUILD_ID=${CIRRUS_BUILD_ID:-$RANDOM$(date +%s)} # must be short and unique
-# Vars. for image-building
-PACKER_VER="1.4.2"
-# CSV of cache-image names to build (see $PACKER_BASE/libpod_images.json)
-
-# List of cache imaes to build for 'CI:IMG' mode via build_vm_images.sh
-# Exists to support manual single-image building in case of emergency
-export PACKER_BUILDS="${PACKER_BUILDS:-ubuntu-20,ubuntu-19,fedora-32,fedora-31}"
-# Google cloud provides these, we just make copies (see $SCRIPT_BASE/README.md) for use
-export UBUNTU_BASE_IMAGE="ubuntu-2004-focal-v20200506"
-export PRIOR_UBUNTU_BASE_IMAGE="ubuntu-1910-eoan-v20200211"
-# Manually produced base-image names (see $SCRIPT_BASE/README.md)
-export FEDORA_BASE_IMAGE="fedora-cloud-base-32-1-6-1588257430"
-export PRIOR_FEDORA_BASE_IMAGE="fedora-cloud-base-31-1-9-1588257430"
-export BUILT_IMAGE_SUFFIX="${BUILT_IMAGE_SUFFIX:--$CIRRUS_REPO_NAME-${CIRRUS_BUILD_ID}}"
+
+OS_RELEASE_ID="$(source /etc/os-release; echo $ID)"
+# GCE image-name compatible string representation of distribution _major_ version
+OS_RELEASE_VER="$(source /etc/os-release; echo $VERSION_ID | cut -d '.' -f 1)"
+# Combined to ease soe usage
+OS_REL_VER="${OS_RELEASE_ID}-${OS_RELEASE_VER}"
+
# IN_PODMAN container image
-IN_PODMAN_IMAGE="quay.io/libpod/in_podman:$DEST_BRANCH"
+IN_PODMAN_IMAGE="quay.io/libpod/${OS_RELEASE_ID}_podman:$_BUILT_IMAGE_SUFFIX"
# Image for uploading releases
UPLDREL_IMAGE="quay.io/libpod/upldrel:master"
@@ -98,7 +89,7 @@ BIGTO="timeout_attempt_delay_command 300s 5 60s"
# Safe env. vars. to transfer from root -> $ROOTLESS_USER (go env handled separately)
ROOTLESS_ENV_RE='(CIRRUS_.+)|(ROOTLESS_.+)|(.+_IMAGE.*)|(.+_BASE)|(.*DIRPATH)|(.*FILEPATH)|(SOURCE.*)|(DEPEND.*)|(.+_DEPS_.+)|(OS_REL.*)|(.+_ENV_RE)|(TRAVIS)|(CI.+)|(REMOTE.*)'
# Unsafe env. vars for display
-SECRET_ENV_RE='(IRCID)|(ACCOUNT)|(GC[EP]..+)|(SSH)'
+SECRET_ENV_RE='(ACCOUNT)|(GC[EP]..+)|(SSH)'
SPECIALMODE="${SPECIALMODE:-none}"
RCLI="${RCLI:-false}"
@@ -111,22 +102,9 @@ then
else
ROOTLESS_USER="${ROOTLESS_USER:-$USER}"
fi
-
-# GCE image-name compatible string representation of distribution name
-OS_RELEASE_ID="$(source /etc/os-release; echo $ID)"
-# GCE image-name compatible string representation of distribution _major_ version
-OS_RELEASE_VER="$(source /etc/os-release; echo $VERSION_ID | cut -d '.' -f 1)"
-# Combined to ease soe usage
-OS_REL_VER="${OS_RELEASE_ID}-${OS_RELEASE_VER}"
# Type of filesystem used for cgroups
CG_FS_TYPE="$(stat -f -c %T /sys/fs/cgroup)"
-# When building images, the version of automation tooling to install
-INSTALL_AUTOMATION_VERSION=1.1.3
-
-# Installed into cache-images, supports overrides
-# by user-data in case of breakage or for debugging.
-CUSTOM_CLOUD_CONFIG_DEFAULTS="$GOSRC/$PACKER_BASE/cloud-init/$OS_RELEASE_ID/cloud.cfg.d"
# Pass in a list of one or more envariable names; exit non-zero with
# helpful error message if any value is empty
req_env_var() {
@@ -237,67 +215,6 @@ timeout_attempt_delay_command() {
fi
}
-ircmsg() {
- req_env_var CIRRUS_TASK_ID IRCID
- [[ -n "$*" ]] || die 9 "ircmsg() invoked without message text argument"
- # Sometimes setup_environment.sh didn't run
- SCRIPT="$(dirname $0)/podbot.py"
- NICK="podbot_$CIRRUS_TASK_ID"
- NICK="${NICK:0:15}" # Any longer will break things
- set +e
- $SCRIPT $NICK $@
- echo "Ignoring exit($?)"
- set -e
-}
-
-# This covers all possible human & CI workflow parallel & serial combinations
-# where at least one caller must definitively discover if within a commit range
-# there is at least one release tag not having any '-' characters (return 0)
-# or otherwise (return non-0).
-is_release() {
- unset RELVER
- local ret
- req_env_var CIRRUS_CHANGE_IN_REPO
- if [[ -n "$CIRRUS_TAG" ]]; then
- RELVER="$CIRRUS_TAG"
- elif [[ ! "$CIRRUS_BASE_SHA" =~ "unknown" ]]
- then
- # Normally not possible for this to be empty, except when unittesting.
- req_env_var CIRRUS_BASE_SHA
- local range="${CIRRUS_BASE_SHA}..${CIRRUS_CHANGE_IN_REPO}"
- if echo "${range}$CIRRUS_TAG" | grep -iq 'unknown'; then
- die 11 "is_release() unusable range ${range} or tag $CIRRUS_TAG"
- fi
-
- if type -P git &> /dev/null
- then
- git fetch --all --tags &> /dev/null|| \
- die 12 "is_release() failed to fetch tags"
- RELVER=$(git log --pretty='format:%d' $range | \
- grep '(tag:' | sed -r -e 's/\s+[(]tag:\s+(v[0-9].*)[)]/\1/' | \
- sort -uV | tail -1)
- ret=$?
- else
- warn -1 "Git command not found while checking for release"
- ret="-1"
- fi
- [[ "$ret" -eq "0" ]] || \
- die 13 "is_release() failed to parse tags"
- else # Not testing a PR, but neither CIRRUS_BASE_SHA or CIRRUS_TAG are set
- return 1
- fi
- if [[ -n "$RELVER" ]]; then
- echo "Found \$RELVER $RELVER"
- if echo "$RELVER" | grep -q '-'; then
- return 2 # development tag
- else
- return 0
- fi
- else
- return 1 # not a release
- fi
-}
-
setup_rootless() {
req_env_var ROOTLESS_USER GOPATH GOSRC SECRET_ENV_RE ROOTLESS_ENV_RE
@@ -369,20 +286,6 @@ setup_rootless() {
die 11 "Timeout exceeded waiting for localhost ssh capability"
}
-# Grab a newer version of git from software collections
-# https://www.softwarecollections.org/en/
-# and use it with a wrapper
-install_scl_git() {
- echo "Installing SoftwareCollections updated 'git' version."
- ooe.sh $SUDO yum -y install rh-git29
- cat << "EOF" | $SUDO tee /usr/bin/git
-#!/usr/bin/env bash
-
-scl enable rh-git29 -- git $@
-EOF
- $SUDO chmod 755 /usr/bin/git
-}
-
install_test_configs() {
echo "Installing cni config, policy and registry config"
req_env_var GOSRC SCRIPT_BASE
@@ -457,66 +360,3 @@ $FEDORA_BASE_IMAGE
$PRIOR_FEDORA_BASE_IMAGE
"
}
-
-systemd_banish() {
- $GOSRC/$PACKER_BASE/systemd_banish.sh
-}
-
-# This can be removed when the kernel bug fix is included in Fedora
-workaround_bfq_bug() {
- if [[ "$OS_RELEASE_ID" == "fedora" ]] && [[ $OS_RELEASE_VER -le 32 ]]; then
- warn "Switching io scheduler to 'deadline' to avoid RHBZ 1767539"
- warn "aka https://bugzilla.kernel.org/show_bug.cgi?id=205447"
- echo "mq-deadline" | sudo tee /sys/block/sda/queue/scheduler > /dev/null
- echo -n "IO Scheduler set to: "
- $SUDO cat /sys/block/sda/queue/scheduler
- fi
-}
-
-# Warning: DO NOT USE.
-# This is called by other functions as the very last step during the VM Image build
-# process. It's purpose is to "reset" the image, so all the first-boot operations
-# happen at test runtime (like generating new ssh host keys, resizing partitions, etc.)
-_finalize() {
- set +e # Don't fail at the very end
- if [[ -d "$CUSTOM_CLOUD_CONFIG_DEFAULTS" ]]
- then
- echo "Installing custom cloud-init defaults"
- $SUDO cp -v "$CUSTOM_CLOUD_CONFIG_DEFAULTS"/* /etc/cloud/cloud.cfg.d/
- else
- echo "Could not find any files in $CUSTOM_CLOUD_CONFIG_DEFAULTS"
- fi
- echo "Re-initializing so next boot does 'first-boot' setup again."
- cd /
- $SUDO rm -rf $GOPATH/src # Actual source will be cloned at runtime
- $SUDO rm -rf /var/lib/cloud/instanc*
- $SUDO rm -rf /root/.ssh/*
- $SUDO rm -rf /etc/ssh/*key*
- $SUDO rm -rf /etc/ssh/moduli
- $SUDO rm -rf /home/*
- $SUDO rm -rf /tmp/*
- $SUDO rm -rf /tmp/.??*
- $SUDO sync
- $SUDO fstrim -av
-}
-
-# Called during VM Image setup, not intended for general use.
-rh_finalize() {
- set +e # Don't fail at the very end
- echo "Resetting to fresh-state for usage as cloud-image."
- PKG=$(type -P dnf || type -P yum || echo "")
- $SUDO $PKG clean all
- $SUDO rm -rf /var/cache/{yum,dnf}
- $SUDO rm -f /etc/udev/rules.d/*-persistent-*.rules
- $SUDO touch /.unconfigured # force firstboot to run
- _finalize
-}
-
-# Called during VM Image setup, not intended for general use.
-ubuntu_finalize() {
- set +e # Don't fail at the very end
- echo "Resetting to fresh-state for usage as cloud-image."
- $LILTO $SUDOAPTGET autoremove
- $SUDO rm -rf /var/cache/apt
- _finalize
-}
diff --git a/contrib/cirrus/lib.sh.t b/contrib/cirrus/lib.sh.t
index 204af1245..643b5513d 100755
--- a/contrib/cirrus/lib.sh.t
+++ b/contrib/cirrus/lib.sh.t
@@ -84,7 +84,7 @@ BAR=1
test_rev "FOO BAR" 0 ''
###############################################################################
-# tests for test_okay()
+# tests for item_test()
function test_item_test {
local exp_msg=$1
@@ -118,46 +118,4 @@ test_item_test "ok okay enough" 0 "okay enough" "line 1
line2" "=" "line 1
line2"
-###############################################################################
-# tests for is_release()
-
-# N/B: Assuming tests run in their own process, so wiping out the local
-# CIRRUS_BASE_SHA CIRRUS_CHANGE_IN_REPO and CIRRUS_TAG will be okay.
-function test_is_release() {
- CIRRUS_BASE_SHA="$1"
- CIRRUS_CHANGE_IN_REPO="$2"
- CIRRUS_TAG="$3"
- local exp_status=$4
- local exp_msg=$5
- local msg
- msg=$(is_release)
- local status=$?
-
- check_result "$msg" "$exp_msg" "is_release(CIRRUS_BASE_SHA='$1' CIRRUS_CHANGE_IN_REPO='$2' CIRRUS_TAG='$3')"
- check_result "$status" "$exp_status" "is_release(...) returned $status"
-}
-
-# FROM TO TAG RET MSG
-test_is_release "" "" "" "9" "FATAL: is_release() requires \$CIRRUS_CHANGE_IN_REPO to be non-empty"
-test_is_release "x" "" "" "9" "FATAL: is_release() requires \$CIRRUS_CHANGE_IN_REPO to be non-empty"
-
-# post-merge / tag-push testing, FROM will be set 'unknown' by (lib.sh default)
-test_is_release "unknown" "x" "" "1" ""
-# post-merge / tag-push testing, oddball tag is set, FROM will be set 'unknown'
-test_is_release "unknown" "unknown" "test-tag" "2" "Found \$RELVER test-tag"
-# post-merge / tag-push testing, sane tag is set, FROM will be set 'unknown'
-test_is_release "unknown" "unknown" "0.0.0" "0" "Found \$RELVER 0.0.0"
-# hack/get_ci_vm or PR testing, FROM and TO are set, no tag is set
-test_is_release "x" "x" "" "1" ""
-
-# Negative-testing git with this function is very difficult, assume git works
-# test_is_release ... "is_release() failed to fetch tags"
-# test_is_release ... "is_release() failed to parse tags"
-
-BF_V1=$(git rev-parse v1.0.0^)
-AT_V1=$(git rev-parse v1.0.0)
-test_is_release "$BF_V1" "$BF_V1" "v9.8.7-dev" "2" "Found \$RELVER v9.8.7-dev"
-test_is_release "$BF_V1" "$AT_V1" "v9.8.7-dev" "2" "Found \$RELVER v9.8.7-dev"
-test_is_release "$BF_V1" "$AT_V1" "" "0" "Found \$RELVER v1.0.0"
-
exit $rc
diff --git a/contrib/cirrus/notice_branch_failure.sh b/contrib/cirrus/notice_branch_failure.sh
deleted file mode 100755
index b810bd266..000000000
--- a/contrib/cirrus/notice_branch_failure.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-source $(dirname $0)/lib.sh
-
-# mIRC "escape" codes are the most standard, for a non-standard client-side interpretation.
-ETX="$(echo -n -e '\x03')"
-RED="${ETX}4"
-NOR="$(echo -n -e '\x0f')"
-
-if [[ "$CIRRUS_BRANCH" = "$DEST_BRANCH" ]]
-then
- BURL="https://cirrus-ci.com/build/$CIRRUS_BUILD_ID"
- ircmsg "${RED}[Action Recommended]: ${NOR}Post-merge testing on ${RED}$CIRRUS_BRANCH failed${NOR} in $CIRRUS_TASK_NAME on ${OS_RELEASE_ID}-${OS_RELEASE_VER}: $BURL. Please investigate, and re-run if appropriate."
-fi
-
-# This script assumed to be executed on failure
-die 1 "Testing Failed"
diff --git a/contrib/cirrus/packer/.gitignore b/contrib/cirrus/packer/.gitignore
deleted file mode 100644
index 8f7bdeaf7..000000000
--- a/contrib/cirrus/packer/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-*json
-packer
-packer*zip
-packer_cache
-cidata*
-meta-data
-user-data
diff --git a/contrib/cirrus/packer/Makefile b/contrib/cirrus/packer/Makefile
deleted file mode 100644
index c5a8e4cac..000000000
--- a/contrib/cirrus/packer/Makefile
+++ /dev/null
@@ -1,94 +0,0 @@
-PACKER_VER ?= 1.4.3
-GOARCH=$(shell go env GOARCH)
-ARCH=$(uname -m)
-PACKER_DIST_FILENAME := packer_${PACKER_VER}_linux_${GOARCH}.zip
-
-# Only needed for libpod_base_images target
-TIMESTAMP := $(shell date +%s)
-GOPATH ?= /var/tmp/go
-GOSRC ?= $(GOPATH)/src/github.com/containers/libpod
-PACKER_BASE ?= contrib/cirrus/packer
-SCRIPT_BASE ?= contrib/cirrus
-POST_MERGE_BUCKET_SUFFIX ?=
-
-UBUNTU_BASE_IMAGE = $(shell source ../lib.sh && echo "$$UBUNTU_BASE_IMAGE")
-PRIOR_UBUNTU_BASE_IMAGE = $(shell source ../lib.sh && echo "$$PRIOR_UBUNTU_BASE_IMAGE")
-
-# For debugging nested-virt, use
-#TTYDEV := $(shell tty)
-TTYDEV := /dev/null
-
-.PHONY: all
-all: libpod_images
-
-# Utility target for checking required parameters
-.PHONY: guard-%
-guard-%:
- @if [[ -z "$($*)" ]]; then \
- echo "Missing or empty required make variable '$*'."; \
- exit 1; \
- fi;
-
-%.json: %.yml
- @python3 -c 'import json,yaml; json.dump( yaml.safe_load(open("$<").read()), open("$@","w"), indent=2);'
-
-${PACKER_DIST_FILENAME}:
- @curl -L --silent --show-error \
- -O https://releases.hashicorp.com/packer/${PACKER_VER}/${PACKER_DIST_FILENAME}
-
-packer: ${PACKER_DIST_FILENAME}
- @curl -L --silent --show-error \
- https://releases.hashicorp.com/packer/${PACKER_VER}/packer_${PACKER_VER}_SHA256SUMS \
- | grep linux_${GOARCH} > /tmp/packer_sha256sums
- @sha256sum --check /tmp/packer_sha256sums
- @unzip -o ${PACKER_DIST_FILENAME}
- @touch --reference=Makefile ${PACKER_DIST_FILENAME}
-
-.PHONY: test
-test: libpod_base_images.json libpod_images.json packer
- ./packer inspect libpod_base_images.json > /dev/null
- ./packer inspect libpod_images.json > /dev/null
- @echo "All good"
-
-.PHONY: libpod_images
-libpod_images: guard-PACKER_BUILDS libpod_images.json packer
- ./packer build \
- -force \
- $(shell test -z "${PACKER_BUILDS}" || echo "-only=${PACKER_BUILDS}") \
- -var GOPATH=$(GOPATH) \
- -var GOSRC=$(GOSRC) \
- -var PACKER_BASE=$(PACKER_BASE) \
- -var SCRIPT_BASE=$(SCRIPT_BASE) \
- libpod_images.json
-
-cidata.ssh:
- ssh-keygen -f $@ -P "" -q
-
-cidata.ssh.pub: cidata.ssh
- touch $@
-
-meta-data:
- echo "local-hostname: localhost.localdomain" > $@
-
-user-data: cidata.ssh.pub
- bash make-user-data.sh
-
-cidata.iso: user-data meta-data
- genisoimage -output cidata.iso -volid cidata -input-charset utf-8 -joliet -rock user-data meta-data
-
-# This is intended to be run by a human, with admin access to the libpod GCE project.
-.PHONY: libpod_base_images
-libpod_base_images: guard-GCP_PROJECT_ID guard-GOOGLE_APPLICATION_CREDENTIALS libpod_base_images.json cidata.iso cidata.ssh packer
- PACKER_CACHE_DIR=/tmp ./packer build \
- $(shell test -z "${PACKER_BUILDS}" || echo "-only=${PACKER_BUILDS}") \
- -force \
- -var TIMESTAMP=$(TIMESTAMP) \
- -var TTYDEV=$(TTYDEV) \
- -var GCP_PROJECT_ID=$(GCP_PROJECT_ID) \
- -var GOOGLE_APPLICATION_CREDENTIALS=$(GOOGLE_APPLICATION_CREDENTIALS) \
- -var GOSRC=$(GOSRC) \
- -var PACKER_BASE=$(PACKER_BASE) \
- -var SCRIPT_BASE=$(SCRIPT_BASE) \
- -var UBUNTU_BASE_IMAGE=$(UBUNTU_BASE_IMAGE) \
- -var PRIOR_UBUNTU_BASE_IMAGE=$(PRIOR_UBUNTU_BASE_IMAGE) \
- libpod_base_images.json
diff --git a/contrib/cirrus/packer/README.how-to-update-cirrus-vms b/contrib/cirrus/packer/README.how-to-update-cirrus-vms
deleted file mode 100644
index ac2902ffb..000000000
--- a/contrib/cirrus/packer/README.how-to-update-cirrus-vms
+++ /dev/null
@@ -1,89 +0,0 @@
-This document briefly describes how to update VMs on Cirrus.
-
-Examples of when you need to do this:
-
- - to update crun, conmon, or some other package(s)
- - to add and/or remove an OS (eg drop f31, add f33)
- - to change system config (eg containers.conf or other /etc files)
- - to change kernel command-line (boot time) options
-
-This is a TWO-STEP process: you need to submit a PR with a magic [CI:IMG]
-description string, wait for it to finish, grab a magic string from the
-results, then resubmit without [CI:IMG].
-
-Procedure, Part One of Two:
-
- 1) Create a working branch:
-
- $ git co -b my_branch_name
-
- 2) Make your changes. Typically, zero or more of the following files:
-
- .cirrus.yml
- contrib/cirrus/packer/*_packaging.sh
-
- I said zero because sometimes you just want to update VMs
- with the latest in dnf or ubuntu repos. That doesn't require
- changing anything here, simply running new dnf/apt installs.
-
- 3) Commit your changes. Be sure to include the magic [CI:IMG] string:
-
- $ git commit -asm'[CI:IMG] this is my commit message'
-
- 4) Submit your PR:
-
- $ gh pr create --fill --web
-
-
- -------------------------- INTERMISSION --------------------------
- ...in which we wait for CI to turn green. In particular, although
- we only really need 'test_build_cache_images' (45 minutes or so)
- to get the required magic number strings, please be a decent
- human being and wait for 'verify_test_built_images' (another hour)
- so we can all have confidence in our process. Thank you.
- -------------------------- INTERMISSION --------------------------
-
-
-Procedure, Part Two of Two:
-
- 1) When 'test_build_cache_images' completes, click it, then click
- 'View more details on Cirrus CI', then expand the 'Run build_vm_image'
- accordion. This gives you a garishly colorful display of lines.
- Each color is a different VM.
-
- 2) Verify that each VM has the packages you require. (The garish log
- doesn't actually list this for all packages, so you may need to
- look in the 'verify_test_built_images' log for each individual
- VM. Click the 'package_versions' accordion.)
-
- 3) At the bottom of this log you will see a block like:
-
- Builds finished. The artifacts of successful builds are:
- ubuntu-19: A disk image was created: ubuntu-19-podman-6439450735542272
- fedora-31: A disk image was created: fedora-31-podman-6439450735542272
- .....
-
- The long numbers at the end should (MUST!) be all identical.
-
- 4) Edit .cirrus.yml locally. Find '_BUILT_IMAGE_SUFFIX' near the
- top. Copy that long number ("6439450735542272", above) and paste
- it here, replacing the previous long number.
-
- 5) Wait for CI to turn green. I know you might have skipped that,
- because 'test_build_cache_images' finishes long before 'verify',
- and maybe you're in a hurry, but come on. Be responsible.
-
- 6) Edit the PR description in github: remove '[CI:IMG]' from the
- title. Again, *in github*, in the web UI, use the 'Edit' button
- at top right next to the PR title. Remove the '[CI:IMG]' string
- from the PR title, press Save. If you forget to do this, the
- VM-building steps will run again (taking a long time) but it
- will be a waste of time.
-
- 7) Update your PR:
-
- $ git add .cirrus.yml (to get the new magic IMAGE_SUFFIX string)
- $ git commit --amend (remove [CI:IMG] for consistency with 6)
- $ git push --force
-
-You can probably take it from here.
diff --git a/contrib/cirrus/packer/README.md b/contrib/cirrus/packer/README.md
deleted file mode 100644
index 9a07ed960..000000000
--- a/contrib/cirrus/packer/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-These are definitions and scripts consumed by packer to produce the
-various distribution images used for CI testing. For more details
-see the [Cirrus CI documentation](../README.md)
diff --git a/contrib/cirrus/packer/cloud-init/fedora/cloud-init.service b/contrib/cirrus/packer/cloud-init/fedora/cloud-init.service
deleted file mode 100644
index 4d2197d87..000000000
--- a/contrib/cirrus/packer/cloud-init/fedora/cloud-init.service
+++ /dev/null
@@ -1,20 +0,0 @@
-[Unit]
-Description=Initial cloud-init job (metadata service crawler)
-DefaultDependencies=no
-Wants=cloud-init-local.service
-After=cloud-init-local.service
-Wants=google-network-daemon.service
-After=google-network-daemon.service
-Before=systemd-user-sessions.service
-
-[Service]
-Type=oneshot
-ExecStart=/usr/bin/cloud-init init
-RemainAfterExit=yes
-TimeoutSec=0
-
-# Output needs to appear in instance console output
-StandardOutput=journal+console
-
-[Install]
-WantedBy=cloud-init.target
diff --git a/contrib/cirrus/packer/cloud-init/fedora/cloud.cfg.d/40_enable_root.cfg b/contrib/cirrus/packer/cloud-init/fedora/cloud.cfg.d/40_enable_root.cfg
deleted file mode 100644
index 672d1907b..000000000
--- a/contrib/cirrus/packer/cloud-init/fedora/cloud.cfg.d/40_enable_root.cfg
+++ /dev/null
@@ -1 +0,0 @@
-disable_root: 0
diff --git a/contrib/cirrus/packer/cloud-init/fedora/cloud.cfg.d/50_custom_disk_setup.cfg b/contrib/cirrus/packer/cloud-init/fedora/cloud.cfg.d/50_custom_disk_setup.cfg
deleted file mode 100644
index c0fdf0e23..000000000
--- a/contrib/cirrus/packer/cloud-init/fedora/cloud.cfg.d/50_custom_disk_setup.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-#cloud-config
-growpart:
- mode: false
-resize_rootfs: false
diff --git a/contrib/cirrus/packer/cloud-init/ubuntu/cloud.cfg.d/40_enable_root.cfg b/contrib/cirrus/packer/cloud-init/ubuntu/cloud.cfg.d/40_enable_root.cfg
deleted file mode 100644
index 672d1907b..000000000
--- a/contrib/cirrus/packer/cloud-init/ubuntu/cloud.cfg.d/40_enable_root.cfg
+++ /dev/null
@@ -1 +0,0 @@
-disable_root: 0
diff --git a/contrib/cirrus/packer/fedora_base-setup.sh b/contrib/cirrus/packer/fedora_base-setup.sh
deleted file mode 100644
index bf29a1aec..000000000
--- a/contrib/cirrus/packer/fedora_base-setup.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-
-# N/B: This script is not intended to be run by humans. It is used to configure the
-# fedora base image for importing, so that it will boot in GCE
-
-set -e
-
-# Load in library (copied by packer, before this script was run)
-source $GOSRC/$SCRIPT_BASE/lib.sh
-
-echo "Updating packages"
-dnf -y update
-
-echo "Installing necessary packages and google services"
-dnf -y install rng-tools google-compute-engine-tools google-compute-engine-oslogin ethtool
-
-echo "Enabling services"
-systemctl enable rngd
-
-# There is a race that can happen on boot between the GCE services configuring
-# the VM, and cloud-init trying to do similar activities. Use a customized
-# unit file to make sure cloud-init starts after the google-compute-* services.
-echo "Setting cloud-init service to start after google-network-daemon.service"
-cp -v $GOSRC/$PACKER_BASE/cloud-init/fedora/cloud-init.service /etc/systemd/system/
-
-# ref: https://cloud.google.com/compute/docs/startupscript
-# The mechanism used by Cirrus-CI to execute tasks on the system is through an
-# "agent" process launched as a GCP startup-script (from the metadata service).
-# This agent is responsible for cloning the repository and executing all task
-# scripts and other operations. Therefor, on SELinux-enforcing systems, the
-# service must be labeled properly to ensure it's child processes can
-# run with the proper contexts.
-METADATA_SERVICE_CTX=unconfined_u:unconfined_r:unconfined_t:s0
-METADATA_SERVICE_PATH=systemd/system/google-startup-scripts.service
-sed -r -e \
- "s/Type=oneshot/Type=oneshot\nSELinuxContext=$METADATA_SERVICE_CTX/" \
- /lib/$METADATA_SERVICE_PATH > /etc/$METADATA_SERVICE_PATH
-
-# Ensure there are no disruptive periodic services enabled by default in image
-systemd_banish
-
-rh_finalize
-
-echo "SUCCESS!"
diff --git a/contrib/cirrus/packer/fedora_packaging.sh b/contrib/cirrus/packer/fedora_packaging.sh
deleted file mode 100644
index fcf9eb93f..000000000
--- a/contrib/cirrus/packer/fedora_packaging.sh
+++ /dev/null
@@ -1,194 +0,0 @@
-#!/usr/bin/env bash
-
-# This script is called from fedora_setup.sh and various Dockerfiles.
-# It's not intended to be used outside of those contexts. It assumes the lib.sh
-# library has already been sourced, and that all "ground-up" package-related activity
-# needs to be done, including repository setup and initial update.
-
-set -e
-
-echo "Updating/Installing repos and packages for $OS_REL_VER"
-
-source $GOSRC/$SCRIPT_BASE/lib.sh
-
-req_env_var GOSRC SCRIPT_BASE BIGTO INSTALL_AUTOMATION_VERSION FEDORA_BASE_IMAGE PRIOR_FEDORA_BASE_IMAGE
-
-# Pre-req. to install automation tooing
-$LILTO $SUDO dnf install -y git
-
-# Install common automation tooling (i.e. ooe.sh)
-curl --silent --show-error --location \
- --url "https://raw.githubusercontent.com/containers/automation/master/bin/install_automation.sh" | \
- $SUDO env INSTALL_PREFIX=/usr/share /bin/bash -s - "$INSTALL_AUTOMATION_VERSION"
-# Reload installed environment right now (happens automatically in a new process)
-source /usr/share/automation/environment
-
-# Set this to 1 to NOT enable updates-testing repository
-DISABLE_UPDATES_TESTING=${DISABLE_UPDATES_TESTING:0}
-
-# Do not enable updates-testing on the previous Fedora release
-if ((DISABLE_UPDATES_TESTING!=0)); then
- warn "Enabling updates-testing repository for image based on $FEDORA_BASE_IMAGE"
- $LILTO $SUDO ooe.sh dnf install -y 'dnf-command(config-manager)'
- $LILTO $SUDO ooe.sh dnf config-manager --set-enabled updates-testing
-else
- warn "NOT enabling updates-testing repository for image based on $PRIOR_FEDORA_BASE_IMAGE"
-fi
-
-$BIGTO ooe.sh $SUDO dnf update -y
-
-# Fedora, as of 31, uses cgroups v2 by default. runc does not support
-# cgroups v2, only crun does. (As of 2020-07-30 runc support is
-# forthcoming but not even close to ready yet). To ensure a reliable
-# runtime environment, force-remove runc if it is present.
-# However, because a few other repos. which use these images still need
-# it, ensure the runc package is cached in $PACKAGE_DOWNLOAD_DIR so
-# it may be swap it in when required.
-REMOVE_PACKAGES=(runc)
-
-INSTALL_PACKAGES=(\
- autoconf
- automake
- bash-completion
- bats
- bridge-utils
- btrfs-progs-devel
- buildah
- bzip2
- conmon
- container-selinux
- containernetworking-plugins
- containers-common
- criu
- crun
- curl
- device-mapper-devel
- dnsmasq
- e2fsprogs-devel
- emacs-nox
- file
- findutils
- fuse3
- fuse3-devel
- gcc
- git
- glib2-devel
- glibc-devel
- glibc-static
- gnupg
- go-md2man
- golang
- gpgme
- gpgme-devel
- grubby
- hostname
- httpd-tools
- iproute
- iptables
- jq
- krb5-workstation
- libassuan
- libassuan-devel
- libblkid-devel
- libcap-devel
- libffi-devel
- libgpg-error-devel
- libguestfs-tools
- libmsi1
- libnet
- libnet-devel
- libnl3-devel
- libseccomp
- libseccomp-devel
- libselinux-devel
- libtool
- libvarlink-util
- libxml2-devel
- libxslt-devel
- lsof
- make
- mlocate
- msitools
- nfs-utils
- nmap-ncat
- openssl
- openssl-devel
- ostree-devel
- pandoc
- pkgconfig
- podman
- policycoreutils
- procps-ng
- protobuf
- protobuf-c
- protobuf-c-devel
- protobuf-devel
- python2
- python3-PyYAML
- python3-dateutil
- python3-libselinux
- python3-libsemanage
- python3-libvirt
- python3-psutil
- python3-pytoml
- python3-requests
- redhat-rpm-config
- rpcbind
- rsync
- sed
- selinux-policy-devel
- skopeo
- skopeo-containers
- slirp4netns
- socat
- tar
- unzip
- vim
- wget
- which
- xz
- zip
- zlib-devel
-)
-DOWNLOAD_PACKAGES=(\
- "cri-o-$(get_kubernetes_version)*"
- cri-tools
- "kubernetes-$(get_kubernetes_version)*"
- runc
- oci-umount
- parallel
-)
-
-echo "Installing general build/test dependencies for Fedora '$OS_RELEASE_VER'"
-$BIGTO ooe.sh $SUDO dnf install -y ${INSTALL_PACKAGES[@]}
-
-# AD-HOC CODE FOR SPECIAL-CASE SITUATIONS!
-# On 2020-07-23 we needed this code to upgrade crun on f31, a build
-# that is not yet in stable. Since CI:IMG PRs are a two-step process,
-# the key part is that we UN-COMMENT-THIS-OUT during the first step,
-# then re-comment it on the second (once we have the built images).
-# That way this will be dead code in future CI:IMG PRs but will
-# serve as an example for anyone in a similar future situation.
-# $BIGTO ooe.sh $SUDO dnf --enablerepo=updates-testing -y upgrade crun
-
-[[ ${#REMOVE_PACKAGES[@]} -eq 0 ]] || \
- $LILTO ooe.sh $SUDO dnf erase -y "${REMOVE_PACKAGES[@]}"
-
-if [[ ${#DOWNLOAD_PACKAGES[@]} -gt 0 ]]; then
- echo "Downloading packages for optional installation at runtime, as needed."
- # Required for cri-o
- ooe.sh $SUDO dnf -y module enable cri-o:$(get_kubernetes_version)
- $SUDO mkdir -p "$PACKAGE_DOWNLOAD_DIR"
- cd "$PACKAGE_DOWNLOAD_DIR"
- $LILTO ooe.sh $SUDO dnf download -y --resolve "${DOWNLOAD_PACKAGES[@]}"
-fi
-
-echo "Installing runtime tooling"
-# Save some runtime by having these already available
-cd $GOSRC
-# Required since initially go was not installed
-source $GOSRC/$SCRIPT_BASE/lib.sh
-echo "Go environment has been setup:"
-go env
-$SUDO make install.tools
-$SUDO $GOSRC/hack/install_catatonit.sh
diff --git a/contrib/cirrus/packer/fedora_setup.sh b/contrib/cirrus/packer/fedora_setup.sh
deleted file mode 100644
index 16ae87d8a..000000000
--- a/contrib/cirrus/packer/fedora_setup.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-
-# This script is called by packer on the subject fedora VM, to setup the podman
-# build/test environment. It's not intended to be used outside of this context.
-
-set -e
-
-# Load in library (copied by packer, before this script was run)
-source $GOSRC/$SCRIPT_BASE/lib.sh
-
-req_env_var SCRIPT_BASE PACKER_BASE INSTALL_AUTOMATION_VERSION PACKER_BUILDER_NAME GOSRC FEDORA_BASE_IMAGE OS_RELEASE_ID OS_RELEASE_VER
-
-workaround_bfq_bug
-
-# Do not enable updates-testing on the previous Fedora release
-if [[ "$PRIOR_FEDORA_BASE_IMAGE" =~ "${OS_RELEASE_ID}-cloud-base-${OS_RELEASE_VER}" ]]; then
- DISABLE_UPDATES_TESTING=1
-else
- DISABLE_UPDATES_TESTING=0
-fi
-
-bash $PACKER_BASE/fedora_packaging.sh
-# Load installed environment right now (happens automatically in a new process)
-source /usr/share/automation/environment
-
-echo "Enabling cgroup management from containers"
-ooe.sh sudo setsebool container_manage_cgroup true
-
-# Ensure there are no disruptive periodic services enabled by default in image
-systemd_banish
-
-rh_finalize
-
-echo "SUCCESS!"
diff --git a/contrib/cirrus/packer/image-builder-image_base-setup.sh b/contrib/cirrus/packer/image-builder-image_base-setup.sh
deleted file mode 100644
index 26fbe2903..000000000
--- a/contrib/cirrus/packer/image-builder-image_base-setup.sh
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env bash
-
-# This script is called by packer on a vanilla CentOS VM, to setup the image
-# used for building images FROM base images. It's not intended to be used
-# outside of this context.
-
-set -e
-
-[[ "$1" == "post" ]] || exit 0 # pre stage not needed
-
-# Load in library (copied by packer, before this script was run)
-source $GOSRC/$SCRIPT_BASE/lib.sh
-
-req_env_var TIMESTAMP GOSRC SCRIPT_BASE PACKER_BASE
-
-install_ooe
-
-echo "Updating packages"
-ooe.sh sudo yum -y update
-
-echo "Configuring repositories"
-ooe.sh sudo yum -y install centos-release-scl epel-release
-
-echo "Installing packages"
-ooe.sh sudo yum -y install \
- genisoimage \
- golang \
- google-cloud-sdk \
- libvirt \
- libvirt-admin \
- libvirt-client \
- libvirt-daemon \
- make \
- python36 \
- python36-PyYAML \
- qemu-img \
- qemu-kvm \
- qemu-kvm-tools \
- qemu-user \
- rsync \
- rng-tools \
- unzip \
- util-linux \
- vim
-
-sudo systemctl enable rngd
-
-sudo ln -s /usr/libexec/qemu-kvm /usr/bin/
-
-sudo tee /etc/modprobe.d/kvm-nested.conf <<EOF
-options kvm-intel nested=1
-options kvm-intel enable_shadow_vmcs=1
-options kvm-intel enable_apicv=1
-options kvm-intel ept=1
-EOF
-
-echo "Installing packer"
-sudo mkdir -p /root/$(basename $PACKER_BASE)
-sudo cp $GOSRC/$PACKER_BASE/*packer* /root/$(basename $PACKER_BASE)
-sudo mkdir -p /root/$(basename $SCRIPT_BASE)
-sudo cp $GOSRC/$SCRIPT_BASE/*.sh /root/$(basename $SCRIPT_BASE)
-
-install_scl_git
-
-echo "Cleaning up"
-cd /
-rm -rf $GOSRC
-
-rh_finalize
-
-echo "SUCCESS!"
diff --git a/contrib/cirrus/packer/libpod_base_images.yml b/contrib/cirrus/packer/libpod_base_images.yml
deleted file mode 100644
index f53bfafc5..000000000
--- a/contrib/cirrus/packer/libpod_base_images.yml
+++ /dev/null
@@ -1,164 +0,0 @@
----
-
-variables:
- # Complete local path to this repository (Required)
- GOSRC:
- # Relative path to this (packer) subdirectory (Required)
- PACKER_BASE:
- # Relative path to cirrus scripts subdirectory (Required)
- SCRIPT_BASE:
- # Unique ID for naming new base-images (required)
- TIMESTAMP:
- # Required for output from qemu builders
- TTYDEV:
-
- # Ubuntu releases are merely copied to this project for control purposes
- UBUNTU_BASE_IMAGE:
- PRIOR_UBUNTU_BASE_IMAGE:
-
- # Latest Fedora release
- FEDORA_IMAGE_URL: "https://dl.fedoraproject.org/pub/fedora/linux/releases/32/Cloud/x86_64/images/Fedora-Cloud-Base-32-1.6.x86_64.qcow2"
- FEDORA_CSUM_URL: "https://dl.fedoraproject.org/pub/fedora/linux/releases/32/Cloud/x86_64/images/Fedora-Cloud-32-1.6-x86_64-CHECKSUM"
- FEDORA_BASE_IMAGE_NAME: 'fedora-cloud-base-32-1-6'
-
- # Prior Fedora release
- PRIOR_FEDORA_IMAGE_URL: "https://dl.fedoraproject.org/pub/fedora/linux/releases/31/Cloud/x86_64/images/Fedora-Cloud-Base-31-1.9.x86_64.qcow2"
- PRIOR_FEDORA_CSUM_URL: "https://dl.fedoraproject.org/pub/fedora/linux/releases/31/Cloud/x86_64/images/Fedora-Cloud-31-1.9-x86_64-CHECKSUM"
- PRIOR_FEDORA_BASE_IMAGE_NAME: 'fedora-cloud-base-31-1-9'
-
- # The name of the image in GCE used for packer build libpod_images.yml
- IBI_BASE_NAME: 'image-builder-image'
- CIDATA_ISO: 'cidata.iso' # produced by Makefile
-
- # Path to json file (required, likely ~/.config/gcloud/legacy_credentials/*/adc.json)
- GOOGLE_APPLICATION_CREDENTIALS:
- # The complete project ID (required, not the short name)
- GCP_PROJECT_ID:
- # Pre-existing storage bucket w/ lifecycle-enabled
- XFERBUCKET: "packer-import" # pre-created, globally unique, lifecycle-enabled
-
-# Don't leak sensitive values in error messages / output
-sensitive-variables:
- - 'GOOGLE_APPLICATION_CREDENTIALS'
- - 'GCP_PROJECT_ID'
-
-# What images to produce in which cloud
-builders:
- - &nested_virt
- name: 'fedora'
- type: 'qemu'
- accelerator: "kvm"
- iso_url: '{{user `FEDORA_IMAGE_URL`}}'
- disk_image: true
- format: "raw"
- disk_size: 5120
- iso_checksum_url: '{{user `FEDORA_CSUM_URL`}}'
- iso_checksum_type: "sha256"
- output_directory: '/tmp/{{build_name}}'
- vm_name: "disk.raw" # actually qcow2, name required for post-processing
- boot_wait: '5s'
- shutdown_command: 'shutdown -h now'
- headless: true
- qemu_binary: "/usr/libexec/qemu-kvm"
- qemuargs: # List-of-list format required to override packer-generated args
- - - "-m"
- - "1024"
- - - "-cpu"
- - "host"
- - - "-device"
- - "virtio-rng-pci"
- - - "-chardev"
- - "tty,id=pts,path={{user `TTYDEV`}}"
- - - "-device"
- - "isa-serial,chardev=pts"
- - - "-cdrom"
- - "{{user `CIDATA_ISO`}}"
- - - "-netdev"
- - "user,id=net0,hostfwd=tcp::{{ .SSHHostPort }}-:22"
- - - "-device"
- - "virtio-net,netdev=net0"
- communicator: 'ssh'
- ssh_private_key_file: 'cidata.ssh'
- ssh_username: 'root'
-
- - <<: *nested_virt
- name: 'prior-fedora'
- iso_url: '{{user `PRIOR_FEDORA_IMAGE_URL`}}'
- iso_checksum_url: '{{user `PRIOR_FEDORA_CSUM_URL`}}'
-
- - &imgcopy
- name: 'ubuntu'
- type: 'googlecompute'
- image_name: '{{user `UBUNTU_BASE_IMAGE`}}'
- image_family: '{{build_name}}-base'
- source_image: '{{user `UBUNTU_BASE_IMAGE`}}'
- source_image_project_id: 'ubuntu-os-cloud'
- project_id: '{{user `GCP_PROJECT_ID`}}'
- account_file: '{{user `GOOGLE_APPLICATION_CREDENTIALS`}}'
- startup_script_file: "systemd_banish.sh"
- zone: 'us-central1-a'
- disk_size: 20
- communicator: 'none'
-
- - <<: *imgcopy
- name: 'prior-ubuntu'
- image_name: '{{user `PRIOR_UBUNTU_BASE_IMAGE`}}'
- source_image: '{{user `PRIOR_UBUNTU_BASE_IMAGE`}}'
-
-provisioners:
- - type: 'shell'
- only: ['fedora', 'prior-fedora']
- inline:
- - 'mkdir -p /tmp/libpod/{{user `SCRIPT_BASE`}}'
- - 'mkdir -p /tmp/libpod/{{user `PACKER_BASE`}}'
-
- - type: 'file'
- only: ['fedora', 'prior-fedora']
- source: '{{user `GOSRC`}}/.cirrus.yml'
- destination: '/tmp/libpod/.cirrus.yml'
-
- - type: 'file'
- only: ['fedora', 'prior-fedora']
- source: '{{user `GOSRC`}}/{{user `SCRIPT_BASE`}}/'
- destination: '/tmp/libpod/{{user `SCRIPT_BASE`}}/'
-
- - type: 'file'
- only: ['fedora', 'prior-fedora']
- source: '{{user `GOSRC`}}/{{user `PACKER_BASE`}}/'
- destination: '/tmp/libpod/{{user `PACKER_BASE`}}/'
-
- - &shell_script
- only: ['fedora', 'prior-fedora']
- type: 'shell'
- inline:
- - 'chmod +x /tmp/libpod/{{user `PACKER_BASE`}}/*.sh'
- - '/tmp/libpod/{{user `PACKER_BASE`}}/{{build_name}}_base-setup.sh'
- expect_disconnect: true # Allow this to reboot the VM if needed
- environment_vars:
- - 'TIMESTAMP={{user `TIMESTAMP`}}'
- - 'GOSRC=/tmp/libpod'
- - 'SCRIPT_BASE={{user `SCRIPT_BASE`}}'
- - 'PACKER_BASE={{user `PACKER_BASE`}}'
-
-post-processors:
- - - type: "compress"
- only: ['fedora', 'prior-fedora']
- output: '/tmp/{{build_name}}/disk.raw.tar.gz'
- format: '.tar.gz'
- compression_level: 9
- - &gcp_import
- only: ['fedora']
- type: "googlecompute-import"
- project_id: '{{user `GCP_PROJECT_ID`}}'
- account_file: '{{user `GOOGLE_APPLICATION_CREDENTIALS`}}'
- bucket: '{{user `XFERBUCKET`}}'
- gcs_object_name: '{{build_name}}-{{user `TIMESTAMP`}}.tar.gz'
- image_name: "{{user `FEDORA_BASE_IMAGE_NAME`}}-{{user `TIMESTAMP`}}"
- image_description: 'Based on {{user `FEDORA_IMAGE_URL`}}'
- image_family: '{{build_name}}-base'
- - <<: *gcp_import
- only: ['prior-fedora']
- image_name: "{{user `PRIOR_FEDORA_BASE_IMAGE_NAME`}}-{{user `TIMESTAMP`}}"
- image_description: 'Based on {{user `PRIOR_FEDORA_IMAGE_URL`}}'
- image_family: '{{build_name}}-base'
- - type: 'manifest'
diff --git a/contrib/cirrus/packer/libpod_images.yml b/contrib/cirrus/packer/libpod_images.yml
deleted file mode 100644
index 38f5a8250..000000000
--- a/contrib/cirrus/packer/libpod_images.yml
+++ /dev/null
@@ -1,86 +0,0 @@
----
-
-# All of these are required
-variables:
- BUILT_IMAGE_SUFFIX: '{{env `BUILT_IMAGE_SUFFIX`}}'
- GOPATH: '{{env `GOPATH`}}'
- GOSRC: '{{env `GOSRC`}}'
- PACKER_BASE: '{{env `PACKER_BASE`}}'
- SCRIPT_BASE: '{{env `SCRIPT_BASE`}}'
-
- # Base-image names are required. Using image family-names breaks parallelism
- UBUNTU_BASE_IMAGE: '{{env `UBUNTU_BASE_IMAGE`}}'
- PRIOR_UBUNTU_BASE_IMAGE: '{{env `PRIOR_UBUNTU_BASE_IMAGE`}}'
- FEDORA_BASE_IMAGE: '{{env `FEDORA_BASE_IMAGE`}}'
- PRIOR_FEDORA_BASE_IMAGE: '{{env `PRIOR_FEDORA_BASE_IMAGE`}}'
-
- # Protected credentials, decrypted by Cirrus at runtime
- GCE_SSH_USERNAME: '{{env `GCE_SSH_USERNAME`}}'
- GCP_PROJECT_ID: '{{env `GCP_PROJECT_ID`}}'
- SERVICE_ACCOUNT: '{{env `SERVICE_ACCOUNT`}}'
- GOOGLE_APPLICATION_CREDENTIALS: '{{env `GOOGLE_APPLICATION_CREDENTIALS`}}'
-
-# Don't leak sensitive values in error messages / output
-sensitive-variables:
- - 'GCE_SSH_USERNAME'
- - 'GCP_PROJECT_ID'
- - 'SERVICE_ACCOUNT'
-
-# What images to produce in which cloud
-builders:
- # v----- is a YAML anchor, allows referencing this object by name (below)
- - &gce_hosted_image
- name: 'ubuntu-20'
- type: 'googlecompute'
- image_name: '{{build_name}}{{user `BUILT_IMAGE_SUFFIX`}}'
- image_family: '{{build_name}}-cache'
- source_image: '{{user `UBUNTU_BASE_IMAGE`}}' # precedence over family
- source_image_family: 'ubuntu-base' # for ref. only
- disk_size: 20 # REQUIRED: Runtime allocation > this value
- project_id: '{{user `GCP_PROJECT_ID`}}'
- service_account_email: '{{user `SERVICE_ACCOUNT`}}'
- communicator: 'ssh'
- ssh_username: '{{user `GCE_SSH_USERNAME`}}'
- ssh_pty: 'true'
- # The only supported zone in Cirrus-CI, as of addition of this comment
- zone: 'us-central1-a'
-
- # v----- is a YAML alias, allows partial re-use of the anchor object
- - <<: *gce_hosted_image
- name: 'ubuntu-19'
- source_image: '{{user `PRIOR_UBUNTU_BASE_IMAGE`}}'
- source_image_family: 'prior-ubuntu-base'
-
- - <<: *gce_hosted_image
- name: 'fedora-32'
- source_image: '{{user `FEDORA_BASE_IMAGE`}}'
- source_image_family: 'fedora-base'
-
- - <<: *gce_hosted_image
- name: 'fedora-31'
- source_image: '{{user `PRIOR_FEDORA_BASE_IMAGE`}}'
- source_image_family: 'prior-fedora-base'
-
-# The brains of the operation, making actual modifications to the base-image.
-provisioners:
- - type: 'shell'
- inline:
- - 'set -ex'
- # The 'file' provisioner item (below) will create the final component
- - 'mkdir -vp $(dirname {{user `GOSRC`}})'
-
- - type: 'file'
- source: '{{user `GOSRC`}}'
- destination: '{{user `GOSRC`}}'
-
- - type: 'shell'
- script: '{{user `GOSRC`}}/{{user `PACKER_BASE`}}/{{split build_name "-" 0}}_setup.sh'
- environment_vars:
- - 'PACKER_BUILDER_NAME={{build_name}}'
- - 'GOPATH={{user `GOPATH`}}'
- - 'GOSRC={{user `GOSRC`}}'
- - 'PACKER_BASE={{user `PACKER_BASE`}}'
- - 'SCRIPT_BASE={{user `SCRIPT_BASE`}}'
-
-post-processors:
- - type: 'manifest' # writes packer-manifest.json
diff --git a/contrib/cirrus/packer/make-user-data.sh b/contrib/cirrus/packer/make-user-data.sh
deleted file mode 100644
index 676a50f5c..000000000
--- a/contrib/cirrus/packer/make-user-data.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env bash
-
-# This script is utilized by Makefile, it's not intended to be run by humans
-
-cat <<EOF > user-data
-#cloud-config
-timezone: US/Eastern
-growpart:
- mode: auto
-disable_root: false
-ssh_pwauth: True
-ssh_import_id: [root]
-ssh_authorized_keys:
- - $(cat cidata.ssh.pub)
-users:
- - name: root
- primary-group: root
- homedir: /root
- system: true
-EOF
diff --git a/contrib/cirrus/packer/prior-fedora_base-setup.sh b/contrib/cirrus/packer/prior-fedora_base-setup.sh
deleted file mode 100644
index bf29a1aec..000000000
--- a/contrib/cirrus/packer/prior-fedora_base-setup.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-
-# N/B: This script is not intended to be run by humans. It is used to configure the
-# fedora base image for importing, so that it will boot in GCE
-
-set -e
-
-# Load in library (copied by packer, before this script was run)
-source $GOSRC/$SCRIPT_BASE/lib.sh
-
-echo "Updating packages"
-dnf -y update
-
-echo "Installing necessary packages and google services"
-dnf -y install rng-tools google-compute-engine-tools google-compute-engine-oslogin ethtool
-
-echo "Enabling services"
-systemctl enable rngd
-
-# There is a race that can happen on boot between the GCE services configuring
-# the VM, and cloud-init trying to do similar activities. Use a customized
-# unit file to make sure cloud-init starts after the google-compute-* services.
-echo "Setting cloud-init service to start after google-network-daemon.service"
-cp -v $GOSRC/$PACKER_BASE/cloud-init/fedora/cloud-init.service /etc/systemd/system/
-
-# ref: https://cloud.google.com/compute/docs/startupscript
-# The mechanism used by Cirrus-CI to execute tasks on the system is through an
-# "agent" process launched as a GCP startup-script (from the metadata service).
-# This agent is responsible for cloning the repository and executing all task
-# scripts and other operations. Therefor, on SELinux-enforcing systems, the
-# service must be labeled properly to ensure it's child processes can
-# run with the proper contexts.
-METADATA_SERVICE_CTX=unconfined_u:unconfined_r:unconfined_t:s0
-METADATA_SERVICE_PATH=systemd/system/google-startup-scripts.service
-sed -r -e \
- "s/Type=oneshot/Type=oneshot\nSELinuxContext=$METADATA_SERVICE_CTX/" \
- /lib/$METADATA_SERVICE_PATH > /etc/$METADATA_SERVICE_PATH
-
-# Ensure there are no disruptive periodic services enabled by default in image
-systemd_banish
-
-rh_finalize
-
-echo "SUCCESS!"
diff --git a/contrib/cirrus/packer/systemd_banish.sh b/contrib/cirrus/packer/systemd_banish.sh
deleted file mode 100755
index 2219f2a4f..000000000
--- a/contrib/cirrus/packer/systemd_banish.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env bash
-
-set +e # Not all of these exist on every platform
-
-# This is intended to be executed on VMs as a startup script on initial-boot.
-# Alternatively, it may be executed with the '--list' option to return the list
-# of systemd units defined for disablement (useful for testing).
-
-EVIL_UNITS="cron crond atd apt-daily-upgrade apt-daily fstrim motd-news systemd-tmpfiles-clean"
-
-if [[ "$1" == "--list" ]]
-then
- echo "$EVIL_UNITS"
- exit 0
-fi
-
-echo "Disabling periodic services that could destabilize testing:"
-for unit in $EVIL_UNITS
-do
- echo "Banishing $unit (ignoring errors)"
- (
- sudo systemctl stop $unit
- sudo systemctl disable $unit
- sudo systemctl disable $unit.timer
- sudo systemctl mask $unit
- sudo systemctl mask $unit.timer
- ) &> /dev/null
-done
diff --git a/contrib/cirrus/packer/ubuntu_packaging.sh b/contrib/cirrus/packer/ubuntu_packaging.sh
deleted file mode 100644
index c478028b5..000000000
--- a/contrib/cirrus/packer/ubuntu_packaging.sh
+++ /dev/null
@@ -1,175 +0,0 @@
-#!/usr/bin/env bash
-
-# This script is called from ubuntu_setup.sh and various Dockerfiles.
-# It's not intended to be used outside of those contexts. It assumes the lib.sh
-# library has already been sourced, and that all "ground-up" package-related activity
-# needs to be done, including repository setup and initial update.
-
-set -e
-
-echo "Updating/Installing repos and packages for $OS_REL_VER"
-
-source $GOSRC/$SCRIPT_BASE/lib.sh
-
-req_env_var GOSRC SCRIPT_BASE BIGTO SUDOAPTGET INSTALL_AUTOMATION_VERSION
-
-echo "Updating/configuring package repositories."
-$BIGTO $SUDOAPTGET update
-
-echo "Installing deps to add third-party repositories and automation tooling"
-$LILTO $SUDOAPTGET install software-properties-common git curl
-
-# Install common automation tooling (i.e. ooe.sh)
-curl --silent --show-error --location \
- --url "https://raw.githubusercontent.com/containers/automation/master/bin/install_automation.sh" | \
- $SUDO env INSTALL_PREFIX=/usr/share /bin/bash -s - "$INSTALL_AUTOMATION_VERSION"
-# Reload installed environment right now (happens automatically in a new process)
-source /usr/share/automation/environment
-
-$LILTO ooe.sh $SUDOAPTADD ppa:criu/ppa
-
-echo "Configuring/Instaling deps from Open build server"
-VERSION_ID=$(source /etc/os-release; echo $VERSION_ID)
-echo "deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_$VERSION_ID/ /" \
- | ooe.sh $SUDO tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
-ooe.sh curl -L -o /tmp/Release.key "https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_${VERSION_ID}/Release.key"
-ooe.sh $SUDO apt-key add - < /tmp/Release.key
-
-INSTALL_PACKAGES=(\
- apache2-utils
- apparmor
- aufs-tools
- autoconf
- automake
- bash-completion
- bats
- bison
- btrfs-progs
- build-essential
- buildah
- bzip2
- conmon
- containernetworking-plugins
- containers-common
- coreutils
- cri-o-runc
- criu
- curl
- dnsmasq
- e2fslibs-dev
- emacs-nox
- file
- fuse3
- gawk
- gcc
- gettext
- git
- go-md2man
- golang-1.14
- iproute2
- iptables
- jq
- libaio-dev
- libapparmor-dev
- libbtrfs-dev
- libcap-dev
- libdevmapper-dev
- libdevmapper1.02.1
- libfuse-dev
- libfuse2
- libfuse3-dev
- libglib2.0-dev
- libgpgme11-dev
- liblzma-dev
- libnet1
- libnet1-dev
- libnl-3-dev
- libprotobuf-c-dev
- libprotobuf-dev
- libseccomp-dev
- libseccomp2
- libselinux-dev
- libsystemd-dev
- libtool
- libudev-dev
- libvarlink
- lsof
- make
- netcat
- openssl
- pkg-config
- podman
- protobuf-c-compiler
- protobuf-compiler
- python-dateutil
- python-protobuf
- python2
- python3-dateutil
- python3-pip
- python3-psutil
- python3-pytoml
- python3-requests
- python3-setuptools
- rsync
- runc
- scons
- skopeo
- slirp4netns
- socat
- sudo
- unzip
- vim
- wget
- xz-utils
- zip
- zlib1g-dev
-)
-DOWNLOAD_PACKAGES=(\
- cri-o-$(get_kubernetes_version)
- cri-tools
- parallel
-)
-
-# These aren't resolvable on Ubuntu 20
-if [[ "$OS_RELEASE_VER" -le 19 ]]; then
- INSTALL_PACKAGES+=(\
- python-future
- python-minimal
- yum-utils
- )
-else
- INSTALL_PACKAGES+=(\
- python-is-python3
- )
-fi
-
-# Do this at the last possible moment to avoid dpkg lock conflicts
-echo "Upgrading all packages"
-$BIGTO ooe.sh $SUDOAPTGET upgrade
-
-echo "Installing general testing and system dependencies"
-# Necessary to update cache of newly added repos
-$LILTO ooe.sh $SUDOAPTGET update
-$BIGTO ooe.sh $SUDOAPTGET install "${INSTALL_PACKAGES[@]}"
-
-if [[ ${#DOWNLOAD_PACKAGES[@]} -gt 0 ]]; then
- echo "Downloading packages for optional installation at runtime, as needed."
- $SUDO ln -s /var/cache/apt/archives "$PACKAGE_DOWNLOAD_DIR"
- $LILTO ooe.sh $SUDOAPTGET install --download-only "${DOWNLOAD_PACKAGES[@]}"
-fi
-
-echo "Configuring Go environment"
-# There are multiple (otherwise conflicting) versions of golang available
-# on Ubuntu. Being primarily localized by env. vars and defaults, dropping
-# a symlink is the appropriate way to "install" a specific version system-wide.
-$SUDO ln -sf /usr/lib/go-1.14/bin/go /usr/bin/go
-# Initially go was not installed
-cd $GOSRC
-source $SCRIPT_BASE/lib.sh
-echo "Go environment has been setup:"
-go env
-
-echo "Building/Installing runtime tooling"
-$SUDO hack/install_catatonit.sh
-$SUDO make install.libseccomp.sudo
-$SUDO make install.tools GO_BUILD='go build' # -mod=vendor breaks this
diff --git a/contrib/cirrus/packer/ubuntu_setup.sh b/contrib/cirrus/packer/ubuntu_setup.sh
deleted file mode 100644
index d650e6c76..000000000
--- a/contrib/cirrus/packer/ubuntu_setup.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env bash
-
-# This script is called by packer on the subject Ubuntu VM, to setup the podman
-# build/test environment. It's not intended to be used outside of this context.
-
-set -e
-
-# Load in library (copied by packer, before this script was run)
-source $GOSRC/$SCRIPT_BASE/lib.sh
-
-req_env_var SCRIPT_BASE PACKER_BASE INSTALL_AUTOMATION_VERSION PACKER_BUILDER_NAME GOSRC UBUNTU_BASE_IMAGE OS_RELEASE_ID OS_RELEASE_VER
-
-# Ensure there are no disruptive periodic services enabled by default in image
-systemd_banish
-
-# Stop disruption upon boot ASAP after booting
-echo "Disabling all packaging activity on boot"
-for filename in $(sudo ls -1 /etc/apt/apt.conf.d); do \
- echo "Checking/Patching $filename"
- sudo sed -i -r -e "s/$PERIODIC_APT_RE/"'\10"\;/' "/etc/apt/apt.conf.d/$filename"; done
-
-bash $PACKER_BASE/ubuntu_packaging.sh
-
-# Load installed environment right now (happens automatically in a new process)
-source /usr/share/automation/environment
-
-echo "Making Ubuntu kernel to enable cgroup swap accounting as it is not the default."
-SEDCMD='s/^GRUB_CMDLINE_LINUX="(.*)"/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory swapaccount=1"/g'
-ooe.sh sudo sed -re "$SEDCMD" -i /etc/default/grub.d/*
-ooe.sh sudo sed -re "$SEDCMD" -i /etc/default/grub
-ooe.sh sudo update-grub
-
-ubuntu_finalize
-
-echo "SUCCESS!"
diff --git a/contrib/cirrus/packer/xfedora_setup.sh b/contrib/cirrus/packer/xfedora_setup.sh
deleted file mode 100644
index 16ae87d8a..000000000
--- a/contrib/cirrus/packer/xfedora_setup.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-
-# This script is called by packer on the subject fedora VM, to setup the podman
-# build/test environment. It's not intended to be used outside of this context.
-
-set -e
-
-# Load in library (copied by packer, before this script was run)
-source $GOSRC/$SCRIPT_BASE/lib.sh
-
-req_env_var SCRIPT_BASE PACKER_BASE INSTALL_AUTOMATION_VERSION PACKER_BUILDER_NAME GOSRC FEDORA_BASE_IMAGE OS_RELEASE_ID OS_RELEASE_VER
-
-workaround_bfq_bug
-
-# Do not enable updates-testing on the previous Fedora release
-if [[ "$PRIOR_FEDORA_BASE_IMAGE" =~ "${OS_RELEASE_ID}-cloud-base-${OS_RELEASE_VER}" ]]; then
- DISABLE_UPDATES_TESTING=1
-else
- DISABLE_UPDATES_TESTING=0
-fi
-
-bash $PACKER_BASE/fedora_packaging.sh
-# Load installed environment right now (happens automatically in a new process)
-source /usr/share/automation/environment
-
-echo "Enabling cgroup management from containers"
-ooe.sh sudo setsebool container_manage_cgroup true
-
-# Ensure there are no disruptive periodic services enabled by default in image
-systemd_banish
-
-rh_finalize
-
-echo "SUCCESS!"
diff --git a/contrib/cirrus/podbot.py b/contrib/cirrus/podbot.py
deleted file mode 100755
index 9ca4915a7..000000000
--- a/contrib/cirrus/podbot.py
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/usr/bin/env python3
-
-# Simple and dumb script to send a message to the #podman IRC channel on frenode
-# Based on example from: https://pythonspot.com/building-an-irc-bot/
-
-import os
-import time
-import random
-import errno
-import socket
-import sys
-
-class IRC:
-
- response_timeout = 30 # seconds
- irc = socket.socket()
-
- def __init__(self, server, nickname, channel):
- self.server = server
- self.nickname = nickname
- self.channel = channel
- self.irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-
- def _send(self, cmdstr):
- self.irc.send(bytes(cmdstr + '\r\n', 'utf-8'))
-
- def message(self, msg):
- data = 'PRIVMSG {0} :{1}\r\n'.format(self.channel, msg)
- print(data)
- self._send(data)
-
- @staticmethod
- def fix_newlines(bufr):
- return bufr.replace('\\r\\n', '\n')
-
- def _required_response(self, needle, haystack):
- start = time.time()
- end = start + self.response_timeout
- while time.time() < end:
- if haystack.find(needle) != -1:
- return (False, haystack)
- time.sleep(0.1)
- try:
- haystack += str(self.irc.recv(4096, socket.MSG_DONTWAIT))
- except socket.error as serr:
- if serr.errno == errno.EWOULDBLOCK:
- continue
- raise # can't handle this
- return (True, haystack) # Error
-
- def connect(self, username, password):
- # This is ugly as sin, but seems to be a working send/expect sequence
-
- print("connecting to: {0}".format(self.server))
- self.irc.connect((self.server, 6667)) #connects to the server
- self._send("USER {0} {0} {0} :I am {0}".format(self.nickname))
- self._send("NICK {0}".format(self.nickname))
-
- err, haystack = self._required_response('End of /MOTD command.'
- ''.format(self.nickname), "")
- if err:
- print(self.fix_newlines(haystack))
- print("Error connecting to {0}".format(self.server))
- return True
-
- print("Logging in as {0}".format(username))
- self._send("PRIVMSG NickServ :IDENTIFY {0} {1}".format(username, password))
- err, _ = self._required_response("You are now identified for", "")
- if err:
- print("Error logging in to {0} as {1}".format(self.server, username))
- return True
-
- print("Joining {0}".format(self.channel))
- self._send("JOIN {0}".format(self.channel))
- err, haystack = self._required_response("{0} {1} :End of /NAMES list."
- "".format(self.nickname, self.channel),
- haystack)
- print(self.fix_newlines(haystack))
- if err:
- print("Error joining {0}".format(self.channel))
- return True
- return False
-
- def quit(self):
- print("Quitting")
- self._send("QUIT :my work is done here")
- self.irc.close()
-
-
-if len(sys.argv) < 3:
- print("Error: Must pass desired nick and message as parameters")
-else:
- for try_again in (True,False):
- irc = IRC("irc.freenode.net", sys.argv[1], "#podman")
- err = irc.connect(*os.environ.get('IRCID', 'Big Bug').split(" ", 2))
- if err and try_again:
- print("Trying again in 5 seconds...")
- time.sleep(5)
- continue
- elif err:
- break
- irc.message(" ".join(sys.argv[2:]))
- time.sleep(5.0) # avoid join/quit spam
- irc.quit()
- break
diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh
index e22f92a5b..b406d7b5c 100755
--- a/contrib/cirrus/setup_environment.sh
+++ b/contrib/cirrus/setup_environment.sh
@@ -33,9 +33,6 @@ done
# Sometimes environment setup needs to vary between distros
# Note: This should only be used for environment variables, and temporary workarounds.
-# Anything externally dependent, should be made fixed-in-time by adding to
-# contrib/cirrus/packer/*_setup.sh to be incorporated into VM cache-images
-# (see docs).
cd "${GOSRC}/"
case "${OS_RELEASE_ID}" in
ubuntu)
@@ -44,8 +41,6 @@ case "${OS_RELEASE_ID}" in
# All SELinux distros need this for systemd-in-a-container
setsebool container_manage_cgroup true
- workaround_bfq_bug
-
if [[ "$ADD_SECOND_PARTITION" == "true" ]]; then
bash "$SCRIPT_BASE/add_second_partition.sh"
fi
@@ -67,14 +62,14 @@ source "$SCRIPT_BASE/lib.sh"
case "$CG_FS_TYPE" in
tmpfs)
warn "Forcing testing with runc instead of crun"
- # On ubuntu, the default runc is usually not new enough.
- if ${OS_RELEASE_ID} == "ubuntu"; then
- X=$(echo "export OCI_RUNTIME=/usr/lib/cri-o-runc/sbin/runc" | \
- tee -a /etc/environment) && eval "$X" && echo "$X"
- else
- X=$(echo "export OCI_RUNTIME=/usr/bin/runc" | \
- tee -a /etc/environment) && eval "$X" && echo "$X"
- fi
+ # On ubuntu, the default runc is usually not new enough.
+ if [[ "$OS_RELEASE_ID" == "ubuntu" ]]; then
+ X=$(echo "export OCI_RUNTIME=/usr/lib/cri-o-runc/sbin/runc" | \
+ tee -a /etc/environment) && eval "$X" && echo "$X"
+ else
+ X=$(echo "export OCI_RUNTIME=/usr/bin/runc" | \
+ tee -a /etc/environment) && eval "$X" && echo "$X"
+ fi
;;
cgroup2fs)
# This is necessary since we've built/installed from source, which uses runc as the default.
diff --git a/contrib/cirrus/success.sh b/contrib/cirrus/success.sh
deleted file mode 100755
index 8783f6b81..000000000
--- a/contrib/cirrus/success.sh
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-source $(dirname $0)/lib.sh
-
-req_env_var CIRRUS_BRANCH CIRRUS_REPO_FULL_NAME CIRRUS_BASE_SHA CIRRUS_CHANGE_IN_REPO CIRRUS_CHANGE_MESSAGE
-
-cd $CIRRUS_WORKING_DIR
-
-if [[ "$CIRRUS_BRANCH" =~ "pull" ]]
-then
- echo "Retrieving latest HEADS and tags"
- git fetch --all --tags
- echo "Finding commit authors for PR $CIRRUS_PR"
- unset NICKS
- if [[ -r "$AUTHOR_NICKS_FILEPATH" ]]
- then
- SHARANGE="${CIRRUS_BASE_SHA}..${CIRRUS_CHANGE_IN_REPO}"
- EXCLUDE_RE='merge-robot'
- EMAILCSET='[:alnum:]-+_@.'
- AUTHOR_NICKS=$(egrep -v '(^[[:space:]]*$)|(^[[:space:]]*#)' "$AUTHOR_NICKS_FILEPATH" | sort -u)
- # Depending on branch-state, it's possible SHARANGE could be _WAY_ too big
- MAX_NICKS=10
- # newline separated
- GITLOG="git log --format='%ae'"
- COMMIT_AUTHORS=$($GITLOG $SHARANGE || $GITLOG -1 HEAD | \
- tr --delete --complement "$EMAILCSET[:space:]" | \
- egrep -v "$EXCLUDE_RE" | \
- sort -u | \
- tail -$MAX_NICKS)
-
- for c_email in $COMMIT_AUTHORS
- do
- c_email=$(echo "$c_email" | tr --delete --complement "$EMAILCSET")
- echo -e "\tExamining $c_email"
- NICK=$(echo "$AUTHOR_NICKS" | grep -m 1 "$c_email" | \
- awk --field-separator ',' '{print $2}' | tr -d '[[:blank:]]')
- if [[ -n "$NICK" ]]
- then
- echo -e "\t\tFound $c_email -> $NICK in $(basename $AUTHOR_NICKS_FILEPATH)"
- else
- echo -e "\t\tNot found in $(basename $AUTHOR_NICKS_FILEPATH), using e-mail username."
- NICK=$(echo "$c_email" | cut -d '@' -f 1)
- fi
- if ! echo "$NICKS" | grep -q "$NICK"
- then
- echo -e "\tUsing nick $NICK"
- NICKS="${NICKS:+$NICKS, }$NICK"
- else
- echo -e "\tNot re-adding duplicate nick $NICK"
- fi
- done
- fi
-
- unset MENTION_PREFIX
- [[ -z "$NICKS" ]] || \
- MENTION_PREFIX="$NICKS: "
-
- URL="https://github.com/$CIRRUS_REPO_FULL_NAME/pull/$CIRRUS_PR"
- PR_SUBJECT=$(echo "$CIRRUS_CHANGE_MESSAGE" | head -1)
- ircmsg "${MENTION_PREFIX}Cirrus-CI testing successful for PR '$PR_SUBJECT': $URL"
-else
- URL="https://cirrus-ci.com/github/containers/libpod/$CIRRUS_BRANCH"
- ircmsg "Cirrus-CI testing branch $(basename $CIRRUS_BRANCH) successful: $URL"
-fi
diff --git a/contrib/podmanimage/README.md b/contrib/podmanimage/README.md
index d6abb8ae6..7641f6c7e 100644
--- a/contrib/podmanimage/README.md
+++ b/contrib/podmanimage/README.md
@@ -49,3 +49,8 @@ podman images
exit
```
+
+**Note:** If you encounter a `fuse: device not found` error when running the container image, it is likely that
+the fuse kernel module has not been loaded on your host system. Use the command `modprobe fuse` to load the
+module and then run the container image. To enable this automatically at boot time, you can add a configuration
+file to `/etc/modules.load.d`. See `man modules-load.d` for more details.
diff --git a/contrib/rootless-cni-infra/Containerfile b/contrib/rootless-cni-infra/Containerfile
new file mode 100644
index 000000000..c5d812a6e
--- /dev/null
+++ b/contrib/rootless-cni-infra/Containerfile
@@ -0,0 +1,35 @@
+ARG GOLANG_VERSION=1.15
+ARG ALPINE_VERSION=3.12
+ARG CNI_VERSION=v0.8.0
+ARG CNI_PLUGINS_VERSION=v0.8.7
+# Aug 20, 2020
+ARG DNSNAME_VESION=78b4da7bbfc51c27366da630e1df1c4f2e8b1b5b
+
+FROM golang:${GOLANG_VERSION}-alpine${ALPINE_VERSION} AS golang-base
+RUN apk add --no-cache git
+
+FROM golang-base AS cnitool
+RUN git clone https://github.com/containernetworking/cni /go/src/github.com/containernetworking/cni
+WORKDIR /go/src/github.com/containernetworking/cni
+ARG CNI_VERSION
+RUN git checkout ${CNI_VERSION}
+RUN go build -o /cnitool ./cnitool
+
+FROM golang-base AS dnsname
+RUN git clone https://github.com/containers/dnsname /go/src/github.com/containers/dnsname
+WORKDIR /go/src/github.com/containers/dnsname
+ARG DNSNAME_VERSION
+RUN git checkout ${DNSNAME_VERSION}
+RUN go build -o /dnsname ./plugins/meta/dnsname
+
+FROM alpine:${ALPINE_VERSION}
+RUN apk add --no-cache curl dnsmasq iptables ip6tables iproute2
+ARG TARGETARCH
+ARG CNI_PLUGINS_VERSION
+RUN mkdir -p /opt/cni/bin && \
+ curl -fsSL https://github.com/containernetworking/plugins/releases/download/${CNI_PLUGINS_VERSION}/cni-plugins-linux-${TARGETARCH}-${CNI_PLUGINS_VERSION}.tgz | tar xz -C /opt/cni/bin
+COPY --from=cnitool /cnitool /usr/local/bin
+COPY --from=dnsname /dnsname /opt/cni/bin
+COPY rootless-cni-infra /usr/local/bin
+ENV CNI_PATH=/opt/cni/bin
+CMD ["sleep", "infinity"]
diff --git a/contrib/rootless-cni-infra/README.md b/contrib/rootless-cni-infra/README.md
new file mode 100644
index 000000000..937e057fb
--- /dev/null
+++ b/contrib/rootless-cni-infra/README.md
@@ -0,0 +1,22 @@
+# rootless-cni-infra
+
+Infra container for CNI-in-slirp4netns.
+
+## How it works
+
+When a CNI network is specified for `podman run` in rootless mode, Podman launches the `rootless-cni-infra` container to execute CNI plugins inside slirp4netns.
+
+The infra container is created per user, by executing an equivalent of:
+`podman run -d --name rootless-cni-infra --pid=host --privileged -v $HOME/.config/cni/net.d:/etc/cni/net.d rootless-cni-infra`.
+The infra container is automatically deleted when no CNI network is in use.
+
+Podman then allocates a CNI netns in the infra container, by executing an equivalent of:
+`podman exec rootless-cni-infra rootless-cni-infra alloc $CONTAINER_ID $NETWORK_NAME $POD_NAME`.
+
+The allocated netns is deallocated when the container is being removed, by executing an equivalent of:
+`podman exec rootless-cni-infra rootless-cni-infra dealloc $CONTAINER_ID $NETWORK_NAME`.
+
+## Directory layout
+
+* `/run/rootless-cni-infra/${CONTAINER_ID}/pid`: PID of the `sleep infinity` process that corresponds to the allocated netns
+* `/run/rootless-cni-infra/${CONTAINER_ID}/attached/${NETWORK_NAME}`: CNI result
diff --git a/contrib/rootless-cni-infra/rootless-cni-infra b/contrib/rootless-cni-infra/rootless-cni-infra
new file mode 100755
index 000000000..5a574d2eb
--- /dev/null
+++ b/contrib/rootless-cni-infra/rootless-cni-infra
@@ -0,0 +1,147 @@
+#!/bin/sh
+set -eu
+
+ARG0="$0"
+VERSION="0.1.0"
+BASE="/run/rootless-cni-infra"
+
+# CLI subcommand: "alloc $CONTAINER_ID $NETWORK_NAME $POD_NAME"
+cmd_entrypoint_alloc() {
+ if [ "$#" -ne 3 ]; then
+ echo >&2 "Usage: $ARG0 alloc CONTAINER_ID NETWORK_NAME POD_NAME"
+ exit 1
+ fi
+
+ ID="$1"
+ NET="$2"
+ K8S_POD_NAME="$3"
+
+ dir="${BASE}/${ID}"
+ mkdir -p "${dir}/attached"
+
+ pid=""
+ if [ -f "${dir}/pid" ]; then
+ pid=$(cat "${dir}/pid")
+ else
+ unshare -n sleep infinity &
+ pid="$!"
+ echo "${pid}" >"${dir}/pid"
+ nsenter -t "${pid}" -n ip link set lo up
+ fi
+ CNI_ARGS="IgnoreUnknown=1;K8S_POD_NAME=${K8S_POD_NAME}"
+ nwcount=$(find "${dir}/attached" -type f | wc -l)
+ CNI_IFNAME="eth${nwcount}"
+ export CNI_ARGS CNI_IFNAME
+ cnitool add "${NET}" "/proc/${pid}/ns/net" >"${dir}/attached/${NET}"
+
+ # return the result
+ ns="/proc/${pid}/ns/net"
+ echo "{\"ns\":\"${ns}\"}"
+}
+
+# CLI subcommand: "dealloc $CONTAINER_ID $NETWORK_NAME"
+cmd_entrypoint_dealloc() {
+ if [ "$#" -ne 2 ]; then
+ echo >&2 "Usage: $ARG0 dealloc CONTAINER_ID NETWORK_NAME"
+ exit 1
+ fi
+
+ ID=$1
+ NET=$2
+
+ dir="${BASE}/${ID}"
+ if [ ! -f "${dir}/pid" ]; then
+ exit 0
+ fi
+ pid=$(cat "${dir}/pid")
+ cnitool del "${NET}" "/proc/${pid}/ns/net"
+ rm -f "${dir}/attached/${NET}"
+
+ nwcount=$(find "${dir}/attached" -type f | wc -l)
+ if [ "${nwcount}" = 0 ]; then
+ kill -9 "${pid}"
+ rm -rf "${dir}"
+ fi
+
+ # return empty json
+ echo "{}"
+}
+
+# CLI subcommand: "is-idle"
+cmd_entrypoint_is_idle() {
+ if [ ! -d ${BASE} ]; then
+ echo '{"idle": true}'
+ elif [ -z "$(ls -1 ${BASE})" ]; then
+ echo '{"idle": true}'
+ else
+ echo '{"idle": false}'
+ fi
+}
+
+# CLI subcommand: "print-cni-result $CONTAINER_ID $NETWORK_NAME"
+cmd_entrypoint_print_cni_result() {
+ if [ "$#" -ne 2 ]; then
+ echo >&2 "Usage: $ARG0 print-cni-result CONTAINER_ID NETWORK_NAME"
+ exit 1
+ fi
+
+ ID=$1
+ NET=$2
+
+ # the result shall be CNI JSON
+ cat "${BASE}/${ID}/attached/${NET}"
+}
+
+# CLI subcommand: "print-netns-path $CONTAINER_ID"
+cmd_entrypoint_print_netns_path() {
+ if [ "$#" -ne 1 ]; then
+ echo >&2 "Usage: $ARG0 print-netns-path CONTAINER_ID"
+ exit 1
+ fi
+
+ ID=$1
+
+ pid=$(cat "${BASE}/${ID}/pid")
+ path="/proc/${pid}/ns/net"
+
+ # return the result
+ echo "{\"path\":\"${path}\"}"
+}
+
+# CLI subcommand: "help"
+cmd_entrypoint_help() {
+ echo "Usage: ${ARG0} COMMAND"
+ echo
+ echo "Rootless CNI Infra container"
+ echo
+ echo "Commands:"
+ echo " alloc Allocate a netns"
+ echo " dealloc Deallocate a netns"
+ echo " is-idle Print whether the infra container is idle"
+ echo " print-cni-result Print CNI result"
+ echo " print-netns-path Print netns path"
+ echo " help Print help"
+ echo " version Print version"
+}
+
+# CLI subcommand: "version"
+cmd_entrypoint_version() {
+ echo "{\"version\": \"${VERSION}\"}"
+}
+
+# parse args
+command="${1:-}"
+if [ -z "$command" ]; then
+ echo >&2 "No command was specified. Run \`${ARG0} help\` to see the usage."
+ exit 1
+fi
+
+command_func=$(echo "cmd_entrypoint_${command}" | sed -e "s/-/_/g")
+if ! command -v "${command_func}" >/dev/null 2>&1; then
+ echo >&2 "Unknown command: ${command}. Run \`${ARG0} help\` to see the usage."
+ exit 1
+fi
+
+# start the command func
+shift
+"${command_func}" "$@"
diff --git a/docs/source/Tutorials.rst b/docs/source/Tutorials.rst
index 33e4ae3d3..83818e3ae 100644
--- a/docs/source/Tutorials.rst
+++ b/docs/source/Tutorials.rst
@@ -6,7 +6,7 @@ Here are a number of useful tutorials to get you up and running with Podman. If
* `Basic Setup and Use of Podman <https://github.com/containers/podman/blob/master/docs/tutorials/podman_tutorial.md>`_: Learn how to setup Podman and perform some basic commands with the utility.
* `Basic Setup and Use of Podman in a Rootless environment <https://github.com/containers/podman/blob/master/docs/tutorials/rootless_tutorial.md>`_: The steps required to setup rootless Podman are enumerated.
-* `Podman Mac Client tutorial <https://github.com/containers/podman/blob/master/docs/tutorials/mac_client.md>`_: Special setup for running the Podman remote client on a Mac and connecting to Podman running on a Linux VM are documented.
+* `Podman Mac/Windows tutorial <https://github.com/containers/podman/blob/master/docs/tutorials/mac_win_client.md>`_: Special setup for running the Podman remote client on a Mac or Windows PC and connecting to Podman running on a Linux VM are documented.
* `How to sign and distribute container images using Podman <https://github.com/containers/podman/blob/master/docs/tutorials/image_signing.md>`_: Learn how to setup and use image signing with Podman.
* `Podman remote-client tutorial <https://github.com/containers/podman/blob/master/docs/tutorials/remote_client.md>`_: A brief how-to on using the Podman remote-client.
* `How to use libpod for custom/derivative projects <https://github.com/containers/podman/blob/master/docs/tutorials/podman-derivative-api.md>`_: How the libpod API can be used within your own project.
diff --git a/docs/source/markdown/podman-build.1.md b/docs/source/markdown/podman-build.1.md
index 6618df1b9..c38424a11 100644
--- a/docs/source/markdown/podman-build.1.md
+++ b/docs/source/markdown/podman-build.1.md
@@ -23,6 +23,8 @@ When the URL is an Containerfile, the Containerfile is downloaded to a temporary
When a Git repository is set as the URL, the repository is cloned locally and then set as the context.
+NOTE: `podman build` uses code sourced from the `buildah` project to build container images. This `buildah` code creates `buildah` containers for the `RUN` options in container storage. In certain situations, when the `podman build` crashes or users kill the `podman build` process, these external containers can be left in container storage. Use the `podman ps --all --storage` command to see these contaienrs. External containers can be removed with the `podman rm --storage` command.
+
## OPTIONS
**--add-host**=*host*
@@ -804,7 +806,7 @@ If you are using a useradd command within a Containerfile with a large UID/GID,
If you are using `useradd` within your build script, you should pass the `--no-log-init or -l` option to the `useradd` command. This option tells useradd to stop creating the lastlog file.
## SEE ALSO
-podman(1), buildah(1), containers-registries.conf(5), crun(8), runc(8), useradd(8)
+podman(1), buildah(1), containers-registries.conf(5), crun(8), runc(8), useradd(8), podman-ps(1), podman-rm(1)
## HISTORY
Aug 2020, Additional options and .dockerignore added by Dan Walsh <dwalsh@redhat.com>
diff --git a/docs/source/markdown/podman-generate-systemd.1.md b/docs/source/markdown/podman-generate-systemd.1.md
index d0b1b3588..2ee290f0f 100644
--- a/docs/source/markdown/podman-generate-systemd.1.md
+++ b/docs/source/markdown/podman-generate-systemd.1.md
@@ -10,7 +10,7 @@ podman\-generate\-systemd - Generate systemd unit file(s) for a container or pod
**podman generate systemd** will create a systemd unit file that can be used to control a container or pod.
By default, the command will print the content of the unit files to stdout.
-Note that this command is not supported for the remote client.
+_Note: If you use this command with the remote client, you would still have to place the generated units on the remote system._
## OPTIONS:
@@ -20,6 +20,10 @@ Generate files instead of printing to stdout. The generated files are named {co
Note: On a system with SELinux enabled, the generated files will inherit contexts from the current working directory. Depending on the SELinux setup, changes to the generated files using `restorecon`, `chcon`, or `semanage` may be required to allow systemd to access these files. Alternatively, use the `-Z` option when running `mv` or `cp`.
+**--format**=*format*
+
+Print the created units in specified format (json). If `--files` is specified the paths to the created files will be printed instead of the unit content.
+
**--name**, **-n**
Use the name of the container for the start, stop, and description in the unit file
diff --git a/docs/source/markdown/podman-login.1.md b/docs/source/markdown/podman-login.1.md
index 79c7ff640..efc7f05e2 100644
--- a/docs/source/markdown/podman-login.1.md
+++ b/docs/source/markdown/podman-login.1.md
@@ -12,9 +12,13 @@ and password. If the registry is not specified, the first registry under [regist
from registries.conf will be used. **podman login** reads in the username and password from STDIN.
The username and password can also be set using the **username** and **password** flags.
The path of the authentication file can be specified by the user by setting the **authfile**
-flag. The default path used is **${XDG\_RUNTIME\_DIR}/containers/auth.json**. If there is a valid
-username and password in the **authfile** , Podman will use those existing credentials if the user does not pass in a username.
-If those credentials are not present, Podman will then use any existing credentials found in **$HOME/.docker/config.json**.
+flag. The default path for reading and writing credentials is **${XDG\_RUNTIME\_DIR}/containers/auth.json**.
+Podman will use existing credentials if the user does not pass in a username.
+Podman will first search for the username and password in the **${XDG\_RUNTIME\_DIR}/containers/auth.json**, if they are not valid,
+Podman will then use any existing credentials found in **$HOME/.docker/config.json**.
+If those credentials are not present, Podman will create **${XDG\_RUNTIME\_DIR}/containers/auth.json** (if the file does not exist) and
+will then store the username and password from STDIN as a base64 encoded string in it.
+For more details about format and configurations of the auth,json file, please refer to containers-auth.json(5)
**podman [GLOBAL OPTIONS]**
@@ -104,7 +108,7 @@ Login Succeeded!
```
## SEE ALSO
-podman(1), podman-logout(1)
+podman(1), podman-logout(1), containers-auth.json(5)
## HISTORY
August 2017, Originally compiled by Urvashi Mohnani <umohnani@redhat.com>
diff --git a/docs/source/markdown/podman-logout.1.md b/docs/source/markdown/podman-logout.1.md
index 8b9f75760..0ff954d43 100644
--- a/docs/source/markdown/podman-logout.1.md
+++ b/docs/source/markdown/podman-logout.1.md
@@ -10,7 +10,7 @@ podman\-logout - Logout of a container registry
**podman logout** logs out of a specified registry server by deleting the cached credentials
stored in the **auth.json** file. If the registry is not specified, the first registry under [registries.search]
from registries.conf will be used. The path of the authentication file can be overridden by the user by setting the **authfile** flag.
-The default path used is **${XDG\_RUNTIME\_DIR}/containers/auth.json**.
+The default path used is **${XDG\_RUNTIME\_DIR}/containers/auth.json**. For more details about format and configurations of the auth,json file, please refer to containers-auth.json(5)
All the cached credentials can be removed by setting the **all** flag.
**podman [GLOBAL OPTIONS]**
@@ -54,7 +54,7 @@ Remove login credentials for all registries
```
## SEE ALSO
-podman(1), podman-login(1)
+podman(1), podman-login(1), containers-auth.json(5)
## HISTORY
August 2017, Originally compiled by Urvashi Mohnani <umohnani@redhat.com>
diff --git a/docs/source/markdown/podman-manifest-add.1.md b/docs/source/markdown/podman-manifest-add.1.md
index 44815def5..c4d4417c4 100644
--- a/docs/source/markdown/podman-manifest-add.1.md
+++ b/docs/source/markdown/podman-manifest-add.1.md
@@ -33,6 +33,25 @@ the image. If *imageName* refers to a manifest list or image index, the
architecture information will be retrieved from it. Otherwise, it will be
retrieved from the image's configuration information.
+**--authfile**=*path*
+
+Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
+If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`. (Not available for remote commands)
+
+Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
+environment variable. `export REGISTRY_AUTH_FILE=path`
+
+**--cert-dir**=*path*
+
+Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
+Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands)
+
+**--creds**=*creds*
+
+The [username[:password]] to use to authenticate with the registry if required.
+If one or both values are not supplied, a command line prompt will appear and the
+value can be entered. The password is entered without echo.
+
**--features**
Specify the features list which the list or index records as requirements for
@@ -50,6 +69,10 @@ configuration information.
Specify the OS version which the list or index records as a requirement for the
image. This option is rarely used.
+**--tls-verify**
+
+Require HTTPS and verify certificates when talking to container registries (defaults to true). (Not available for remote commands)
+
**--variant**
Specify the variant which the list or index records for the image. This option
diff --git a/docs/source/markdown/podman-ps.1.md b/docs/source/markdown/podman-ps.1.md
index 2f8112aab..58d3358e5 100644
--- a/docs/source/markdown/podman-ps.1.md
+++ b/docs/source/markdown/podman-ps.1.md
@@ -32,12 +32,18 @@ all the containers information. By default it lists:
**--all**, **-a**
-Show all the containers, default is only running containers
+Show all the containers created by Podman, default is only running containers.
+
+Note: Podman shares containers storage with other tools such as Buildah and CRI-O. In some cases these `external` containers might also exist in the same storage. Use the `--storage` option to see these external containers. External containers show the 'storage' status.
**--pod**, **-p**
Display the pods the containers are associated with
+**--storage**
+
+Display external containers that are not controlled by Podman but are stored in containers storage. These external containers are generally created via other container technology such as Buildah or CRI-O and may depend on the same container images that Podman is also using. External containers are denoted with either a 'buildah' or 'storage' in the COMMAND and STATUS column of the ps output. Only used with the --all option.
+
**--no-trunc**
Display the extended information
@@ -174,11 +180,20 @@ CONTAINER ID IMAGE COMMAND CREATED STATUS
```
+```
+$ podman ps --storage -a
+CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+69ed779d8ef9f redis:alpine "redis-server" 25 hours ago Created 6379/tcp k8s_container1_podsandbox1_redhat.test.crio_redhat-test-crio_1
+38a8a78596f9 docker.io/library/busybox:latest buildah 2 hours ago storage busybox-working-container
+fd7b786b5c32 docker.io/library/alpine:latest buildah 2 hours ago storage alpine-working-container
+f78620804e00 scratch buildah 2 hours ago storage working-container
+```
+
## ps
Print a list of containers
## SEE ALSO
-podman(1)
+podman(1), buildah(1), crio(8)
## HISTORY
August 2017, Originally compiled by Urvashi Mohnani <umohnani@redhat.com>
diff --git a/docs/source/markdown/podman-rm.1.md b/docs/source/markdown/podman-rm.1.md
index cddf06e3e..990af0cd1 100644
--- a/docs/source/markdown/podman-rm.1.md
+++ b/docs/source/markdown/podman-rm.1.md
@@ -45,9 +45,9 @@ The latest option is not supported on the remote client.
**--storage**
-Remove the container from the storage library only.
-This is only possible with containers that are not present in libpod (cannot be seen by **podman ps**).
-It is used to remove containers from **podman build** and **buildah**, and orphan containers which were only partially removed by **podman rm**.
+Remove external containers from the storage library.
+This is only possible with containers that are not present in libpod can be seen by **podman ps --all --storage**).
+It is used to remove external containers from **podman build** and **buildah**, and orphan containers which were only partially removed by **podman rm**.
The storage option conflicts with the **--all**, **--latest**, and **--volumes** options.
**--volumes**, **-v**
@@ -96,7 +96,7 @@ $ podman rm -f --latest
**125** The command fails for a reason other than container did not exist or is paused/running
## SEE ALSO
-podman(1), podman-image-rm(1)
+podman(1), podman-image-rm(1), podman-ps(1), podman-build(1)
## HISTORY
August 2017, Originally compiled by Ryan Cole <rycole@redhat.com>
diff --git a/docs/source/markdown/podman-save.1.md b/docs/source/markdown/podman-save.1.md
index b2b0995d3..f19c9723a 100644
--- a/docs/source/markdown/podman-save.1.md
+++ b/docs/source/markdown/podman-save.1.md
@@ -40,6 +40,10 @@ Save image to **oci-archive, oci-dir** (directory with oci manifest type), or **
--format docker-dir
```
+**--multi-image-archive**, **-m**
+
+Allow for creating archives with more than one image. Additional names will be interpreted as images instead of tags. Only supported for **docker-archive**.
+
**--quiet**, **-q**
Suppress the output
diff --git a/docs/tutorials/README.md b/docs/tutorials/README.md
index 4beb069ac..7f7b4853d 100644
--- a/docs/tutorials/README.md
+++ b/docs/tutorials/README.md
@@ -12,9 +12,9 @@ Learn how to setup Podman and perform some basic commands with the utility.
The steps required to setup rootless Podman are enumerated.
-**[Setup on OS X](mac_client.md)**
+**[Setup Mac/Windows](mac_win_client.md)
-Special setup for running the Podman remote client on a Mac and connecting to Podman running on a Linux VM are documented.
+Special setup for running the Podman remote client on a Mac or Windows PC and connecting to Podman running on a Linux VM are documented.
**[Remote Client](remote_client.md)**
diff --git a/docs/tutorials/mac_client.md b/docs/tutorials/mac_client.md
index f6c9160a8..f406ca54d 100644
--- a/docs/tutorials/mac_client.md
+++ b/docs/tutorials/mac_client.md
@@ -1,99 +1,2 @@
-# Podman Mac Client tutorial
-
-## What is the Podman Mac Client
-
-First and foremost, the Mac Client is under heavy development. We are working on getting the
-Mac client to be packaged and run for a native-like experience. This is the setup tutorial
-for the Mac client at its current stage of development and packaging.
-
-The purpose of the Mac client for Podman is to allow users to run Podman on a Mac. Since Podman is a Linux
-container engine, The Mac client is actually a version of the [Podman-remote client](remote_client.md),
-edited to that the client side works on a Mac machine, and connects to a Podman "backend" on a Linux
-machine, virtual or physical. The goal is to have a native-like experience when working with the Mac
-client, so the command line interface of the remote client is exactly the same as the regular Podman
-commands with the exception of some flags and commands that do not apply to the Mac client.
-
-## What you need
-
-To use the Mac client, you will need a binary built for MacOS and a Podman "backend" on a Linux machine;
-hereafter referred to as the Podman node. In this context, a Podman node is a Linux system with Podman
-installed on it and the varlink service activated. You will also need to be able to ssh into this
-system as a user with privileges to the varlink socket (more on this later).
-
-For best results, use the most recent version of MacOS
-
-## Getting the Mac client
-The Mac client is available through [Homebrew](https://brew.sh/).
-```
-$ brew cask install podman
-```
-
-## Setting up the client and Podman node connection
-
-To use the Mac client, you must perform some setup on both the Mac and Podman nodes. In this case,
-the Mac node refers to the Mac on which Podman is being run; and the Podman node refers to where
-Podman and its storage reside.
-
-### Connection settings
-Your Linux box must have ssh enabled, and you must copy your Mac's public key from `~/.sconf sh/id.pub` to
-`/root/.ssh/authorized_keys` on your Linux box using `ssh-copy-id` This allows for the use of SSH keys
-for remote access.
-
-You may need to edit your `/etc/ssh/sshd_config` in your Linux machine as follows:
-```
-PermitRootLogin yes
-```
-
-Use of SSH keys are strongly encouraged to ensure a secure login. However, if you wish to avoid ‘logging in’ every
-time you run a Podman command, you may edit your `/etc/ssh/sshd_config` on your Linux machine as follows:
-```
-PasswordAuthentication no
-PermitRootLogin without-password
-```
-
-### Podman node setup
-The Podman node must be running a Linux distribution that supports Podman and must have Podman (not the Mac
-client) installed. You must also have root access to the node. Check if your system uses systemd:
-```
-$cat /proc/1/comm
-systemd
-```
-If it does, then simply start the Podman varlink socket:
-```
-$ sudo systemctl start io.podman.socket
-$ sudo systemctl enable io.podman.socket
-```
-
-If your system cannot use systemd, then you can manually establish the varlink socket with the Podman
-command:
-```
-$ sudo podman --log-level debug varlink --timeout 0 unix://run/podman/io.podman
-```
-
-### Required permissions
-For now, the Mac client requires that you be able to run a privileged Podman and have privileged ssh
-access to the remote system. This limitation is being worked on.
-
-#### Running the remote client
-There are three different ways to pass connection information into the client: flags, conf file, and
-environment variables. All three require information on username and a remote host ip address. Most often,
-your username should be root and you can obtain your remote-host-ip using `ip addr`
-
-To connect using flags, you can use
-```
-$ podman --remote-host remote-host-ip --username root images
-REPOSITORY TAG IMAGE ID CREATED SIZE
-quay.io/podman/stable latest 9c1e323be87f 10 days ago 414 MB
-localhost/test latest 4b8c27c343e1 4 weeks ago 253 MB
-k8s.gcr.io/pause 3.1 da86e6ba6ca1 20 months ago 747 kB
-```
-If the conf file is set up, you may simply use Podman as you would on the linux machine. Take a look at
-[podman-remote.conf.5.md](https://github.com/containers/podman/blob/master/docs/podman-remote.conf.5.md) on how to use the conf file:
-
-```
-$ podman images
-REPOSITORY TAG IMAGE ID CREATED SIZE
-quay.io/podman/stable latest 9c1e323be87f 10 days ago 414 MB
-localhost/test latest 4b8c27c343e1 4 weeks ago 253 MB
-k8s.gcr.io/pause 3.1 da86e6ba6ca1 20 months ago 747 kB
-```
+# [Podman Mac Client tutorial](https://github.com/containers/podman/blob/master/docs/tutorials/mac_win_client.md)
+This tutorial has moved! You can find out how to set up Podman on MacOS (as well as Windows) [here](https://github.com/containers/podman/blob/master/docs/tutorials/mac_win_client.md)
diff --git a/docs/tutorials/mac_win_client.md b/docs/tutorials/mac_win_client.md
new file mode 100644
index 000000000..63830a5b1
--- /dev/null
+++ b/docs/tutorials/mac_win_client.md
@@ -0,0 +1,111 @@
+# Podman Remote clients for MacOS and Windows
+
+## Introduction
+
+The core Podman runtime environment can only run on Linux operating systems. But other operating systems can use the “remote client” to manage their containers to a Linux backend. This remote client is nearly identical to the standard Podman program. Certain functions that do not make sense for remote clients have been removed. For example, the “--latest” switch for container commands has been removed.
+
+### Brief architecture
+
+The remote client uses a client-server model. You need Podman installed on a Linux machine or VM that also has the SSH daemon running. On the local operating system, when you execute a Podman command, Podman connects to the server via SSH. It then connects to the Podman service by using systemd socket activation. The Podman commands are executed on the server. From the client's point of view, it seems like Podman runs locally.
+
+## Obtaining and installing Podman
+
+### Windows
+
+Installing the Windows Podman client begins by downloading the Podman windows installer. The windows installer is built with each Podman release and is downloadable from its [release description page](https://github.com/containers/podman/releases/latest). You can also build the installer from source using the `podman.msi` Makefile endpoint.
+
+Once you have downloaded the installer, simply double click the installer and Podman will be installed. The path is also set to put `podman` in the default user path.
+
+Podman must be run at a command prompt using the Windows ‘cmd” or powershell applications.
+
+### MacOS
+
+The Mac Client is available through [Homebrew](https://brew.sh/). You can download homebrew via the instructions on their site. Install podman using:
+```
+$ brew install podman
+```
+
+## Creating the first connection
+
+### Enable the Podman service on the server machine.
+
+Before performing any Podman client commands, you must enable the podman.sock SystemD service on the Linux server. In these examples, we are running Podman as a normal, unprivileged user, also known as a rootless user. By default, the rootless socket listens at `/run/user/${UID}/podman/podman.sock`. You can enable this socket, permanently using the following command:
+```
+$ systemctl --user enable podman.socket
+```
+You will need to enable linger for this user in order for the socket to work when the user is not logged in.
+
+```
+$ sudo loginctl enable-linger $USER
+```
+
+You can verify that the socket is listening with a simple Podman command.
+
+```
+$ podman --remote info
+host:
+ arch: amd64
+ buildahVersion: 1.16.0-dev
+ cgroupVersion: v2
+ conmon:
+ package: conmon-2.0.19-1.fc32.x86_64
+```
+
+#### Enable sshd
+
+In order for the client to communicate with the server you need to enable and start the SSH daemon on your Linux machine, if it is not currently enabled.
+```
+$ sudo systemctl enable -s sshd
+```
+
+#### Setting up SSH
+Remote podman uses SSH to communicate between the client and server. The remote client works considerably smoother using SSH keys. To set up your ssh connection, you need to generate an ssh key pair from your client machine.
+```
+$ ssh-keygen
+```
+Your public key by default should be in your home directory under .ssh\id_rsa.pub. You then need to copy the contents of id_rsa.pub and append it into ~/.ssh/authorized_keys on the Linux server. On a Mac, you can automate this using ssh-copy-id.
+
+If you do not wish to use SSH keys, you will be prompted with each Podman command for your login password.
+
+## Using the client
+
+The first step in using the Podman remote client is to configure a connection..
+
+You can add a connection by using the `podman system connection add` command.
+
+```
+C:\Users\baude> podman system connection add baude --identity c:\Users\baude\.ssh\id_rsa ssh://192.168.122.1/run/user/1000/podman/podman.sock
+```
+
+This will add a remote connection to Podman and if it is the first connection added, it will mark the connection as the default. You can observe your connections with `podman system connection list`
+
+```
+C:\Users\baude> podman system connection list
+Name Identity URI
+baude* id_rsa ssh://baude@192.168.122.1/run/user/1000/podman/podman.sock
+```
+
+Now we can test the connection with `podman info`.
+
+```
+C:\Users\baude> podman info
+host:
+ arch: amd64
+ buildahVersion: 1.16.0-dev
+ cgroupVersion: v2
+ conmon:
+ package: conmon-2.0.19-1.fc32.x86_64
+```
+
+Podman has also introduced a “--connection” flag where you can use other connections you have defined. If no connection is provided, the default connection will be used.
+
+```
+C:\Users\baude> podman system connection --help
+```
+
+## Wrap up
+
+You can use the podman remote clients to manage your containers running on a Linux server. The communication between client and server relies heavily on SSH connections and the use of SSH keys are encouraged. Once you have Podman installed on your remote client, you should set up a connection using `podman system connection add` which will then be used by subsequent Podman commands.
+
+## History
+Originally published on [Red Hat Enable Sysadmin](https://www.redhat.com/sysadmin/podman-clients-macos-windows)
diff --git a/docs/tutorials/podman_tutorial.md b/docs/tutorials/podman_tutorial.md
index 97268fc41..85b95af04 100644
--- a/docs/tutorials/podman_tutorial.md
+++ b/docs/tutorials/podman_tutorial.md
@@ -5,7 +5,7 @@ Podman is a utility provided as part of the libpod library. It can be used to c
containers. The following tutorial will teach you how to set up Podman and perform some basic
commands with Podman.
-If you are running on a Mac, you should instead follow the [Mac tutorial](https://github.com/containers/podman/blob/master/docs/tutorials/mac_client.md)
+If you are running on a Mac or Windows PC, you should instead follow the [Mac and Windows tutorial](https://github.com/containers/podman/blob/master/docs/tutorials/mac_win_client.md)
to set up the remote Podman client.
**NOTE**: the code samples are intended to be run as a non-root user, and use `sudo` where
diff --git a/docs/tutorials/remote_client.md b/docs/tutorials/remote_client.md
index d4c43dda2..ad506d19a 100644
--- a/docs/tutorials/remote_client.md
+++ b/docs/tutorials/remote_client.md
@@ -1,88 +1,112 @@
# Podman remote-client tutorial
-## What is the remote-client
+## Introduction
+The purpose of the Podman remote-client is to allow users to interact with a Podman "backend" while on a separate client. The command line interface of the remote client is exactly the same as the regular Podman commands with the exception of some flags being removed as they do not apply to the remote-client.
-First and foremost, the remote-client is under heavy development. We are adding new
-commands and functions frequently. We also are working on a rootless implementation that
-does not require privileged users.
+The remote client takes advantage of a client-server model. You need Podman installed on a Linux machine or VM that also has the SSH daemon running. On the local operating system, when you execute a Podman command, Podman connects to the server via SSH. It then connects to the Podman service by using systemd socket activation, and hitting our [Rest API](https://docs.podman.io/en/latest/_static/api.html). The Podman commands are executed on the server. From the client's point of view, it seems like Podman runs locally.
-The purpose of the Podman remote-client is to allow users to interact with a Podman "backend"
-while on a separate client. The command line interface of the remote client is exactly the
-same as the regular Podman commands with the exception of some flags being removed as they
-do not apply to the remote-client.
+This tutorial is for running Podman remotely on Linux. If you are using a Mac or a Windows PC, please follow the [Mac and Windows tutorial](https://github.com/containers/podman/blob/master/docs/tutorials/mac_win_client.md)
-## What you need
-To use the remote-client, you will need a binary for your client and a Podman "backend"; hereafter
-referred to as the Podman node. In this context, a Podman node is a Linux system with Podman
-installed on it and the varlink service activated. You will also need to be able to ssh into this
-system as a user with privileges to the varlink socket (more on this later).
+## Obtaining and installing Podman
-## Building the remote client
-At this time, the Podman remote-client is not being packaged for any distribution. It must be built from
-source. To set up your build environment, see [Installation notes](https://github.com/containers/podman/blob/master/install.md) and follow the
-section [Building from scratch](https://github.com/containers/podman/blob/master/install.md#building-from-scratch). Once you can successfully
-build the regular Podman binary, you can now build the remote-client.
+### Client machine
+You will need either Podman or the podman-remote client. The difference between the two is that the compiled podman-remote client can only act as a remote client connecting to a backend, while Podman can run local, standard Podman commands, as well as act as a remote client (using `podman --remote`)
+
+If you already have Podman installed, you do not need to install podman-remote.
+
+You can find out how to [install Podman here](https://podman.io/getting-started/installation)
+
+If you would like to install only the podman-remote client, it is downloadable from its [release description page](https://github.com/containers/podman/releases/latest). You can also build it from source using the `make podman-remote`
+
+
+### Server Machine
+You will need to [install Podman](https://podman.io/getting-started/installation) on your server machine.
+
+
+## Creating the first connection
+
+### Enable the Podman service on the server machine.
+
+Before performing any Podman client commands, you must enable the podman.sock SystemD service on the Linux server. In these examples, we are running Podman as a normal, unprivileged user, also known as a rootless user. By default, the rootless socket listens at `/run/user/${UID}/podman/podman.sock`. You can enable this socket permanently using the following command:
```
-$ make podman-remote
+$ systemctl --user enable podman.socket
```
-Like building the regular Podman, the resulting binary will be in the *bin* directory. This is the binary
-you will run on the remote node later in the instructions.
+You will need to enable linger for this user in order for the socket to work when the user is not logged in:
-## Setting up the remote and Podman nodes
+```
+$ sudo loginctl enable-linger $USER
+```
+This is only required if you are not running Podman as root.
-To use the remote-client, you must perform some setup on both the remote and Podman nodes. In this case,
-the remote node refers to where the remote-client is being run; and the Podman node refers to where
-Podman and its storage reside.
+You can verify that the socket is listening with a simple Podman command.
+```
+$ podman --remote info
+host:
+ arch: amd64
+ buildahVersion: 1.16.0-dev
+ cgroupVersion: v2
+ conmon:
+ package: conmon-2.0.19-1.fc32.x86_64
+```
-### Podman node setup
+#### Enable sshd
-Varlink bridge support is provided by the varlink cli command and installed using:
+In order for the Podman client to communicate with the server you need to enable and start the SSH daemon on your Linux machine, if it is not currently enabled.
```
-$ sudo dnf install varlink-cli
+$ sudo systemctl enable -s sshd
```
-The Podman node must have Podman (not the remote-client) installed as normal. If your system uses systemd,
-then simply start the Podman varlink socket.
+#### Setting up SSH
+Remote Podman uses SSH to communicate between the client and server. The remote client works considerably smoother using SSH keys. To set up your ssh connection, you need to generate an ssh key pair from your client machine.
```
-$ sudo systemctl start io.podman.socket
+$ ssh-keygen
```
+Your public key by default should be in your home directory under ~/.ssh/id_rsa.pub. You then need to copy the contents of id_rsa.pub and append it into ~/.ssh/authorized_keys on the Linux server. You can automate this using ssh-copy-id.
+
+If you do not wish to use SSH keys, you will be prompted with each Podman command for your login password.
+
+## Using the client
+
+Note: `podman-remote` is equivalent to `podman --remote` here, depending on what you have chosen to install.
+
+The first step in using the Podman remote client is to configure a connection.
+
+You can add a connection by using the `podman-remote system connection add` command.
-If your system cannot use systemd, then you can manually establish the varlink socket with the Podman
-command:
```
-$ sudo podman --log-level debug varlink --timeout 0 unix://run/podman/io.podman
+$ podman-remote system connection add myuser --identity ~/.ssh/id_rsa ssh://192.168.122.1/run/user/1000/podman/podman.sock
```
-### Required permissions
-For now, the remote-client requires that you be able to run a privileged Podman and have privileged ssh
-access to the remote system. This limitation is being worked on.
+This will add a remote connection to Podman and if it is the first connection added, it will mark the connection as the default. You can observe your connections with `podman-remote system connection list`:
-### Remote node setup
-
-#### Initiate an ssh session to the Podman node
-To use the remote client, an ssh connection to the Podman server must be established.
+```
+$ podman-remote system connection list
+Name Identity URI
+myuser* id_rsa ssh://myuser@192.168.122.1/run/user/1000/podman/podman.sock
+```
-Using the varlink bridge, an ssh tunnel must be initiated to connect to the server. Podman must then be informed of the location of the sshd server on the targeted server
+Now we can test the connection with `podman info`:
```
-$ export PODMAN_VARLINK_BRIDGE=$'ssh -T -p22 root@remotehost -- "varlink -A \'podman varlink \$VARLINK_ADDRESS\' bridge"'
-$ bin/podman-remote images
-REPOSITORY TAG IMAGE ID CREATED SIZE
-docker.io/library/ubuntu latest 47b19964fb50 2 weeks ago 90.7 MB
-docker.io/library/alpine latest caf27325b298 3 weeks ago 5.8 MB
-quay.io/cevich/gcloud_centos latest 641dad61989a 5 weeks ago 489 MB
-k8s.gcr.io/pause 3.1 da86e6ba6ca1 14 months ago 747 kB
+$ podman-remote info
+host:
+ arch: amd64
+ buildahVersion: 1.16.0-dev
+ cgroupVersion: v2
+ conmon:
+ package: conmon-2.0.19-1.fc32.x86_64
```
-The PODMAN_VARLINK_BRIDGE variable may be added to your log in settings. It does not change per connection.
+Podman-remote has also introduced a “--connection” flag where you can use other connections you have defined. If no connection is provided, the default connection will be used.
-If coming from a Windows machine, the PODMAN_VARLINK_BRIDGE is formatted as:
```
-set PODMAN_VARLINK_BRIDGE=C:\Windows\System32\OpenSSH\ssh.exe -T -p22 root@remotehost -- varlink -A "podman varlink $VARLINK_ADDRESS" bridge
+$ podman-remote system connection --help
```
-The arguments before the `--` are presented to ssh while the arguments after are for the varlink cli. The varlink arguments should be copied verbatim.
- - `-p` is the port on the remote host for the ssh tunnel. `22` is the default.
- - `root` is the currently supported user, while `remotehost` is the name or IP address of the host providing the Podman service.
- - `-i` may be added to select an identity file.
+## Wrap up
+
+You can use the Podman remote clients to manage your containers running on a Linux server. The communication between client and server relies heavily on SSH connections and the use of SSH keys are encouraged. Once you have Podman installed on your remote client, you should set up a connection using `podman-remote system connection add` which will then be used by subsequent Podman commands.
+
+## History
+Adapted from the [Mac and Windows tutorial](https://github.com/containers/podman/blob/master/docs/tutorials/mac_win_client.md)
diff --git a/docs/tutorials/varlink_remote_client.md b/docs/tutorials/varlink_remote_client.md
new file mode 100644
index 000000000..54c648a48
--- /dev/null
+++ b/docs/tutorials/varlink_remote_client.md
@@ -0,0 +1,89 @@
+# Podman varlink remote-client tutorial [DEPRECATED]
+
+## What is the varlink client
+
+This API has been deprecated by the [REST API](https://docs.podman.io/en/latest/_static/api.html).
+For usage on Windows and Mac, please reference the [Podman Mac/Windows tutorial](https://github.com/containers/podman/blob/master/docs/tutorials/mac_win_client.md)
+Varlink support is in maintenance mode, and will be removed in a future release.
+For more details, you can see [this blog](https://podman.io/blogs/2020/01/17/podman-new-api.html).
+
+The purpose of the Podman remote-client is to allow users to interact with a Podman "backend"
+while on a separate client. The command line interface of the remote client is exactly the
+same as the regular Podman commands with the exception of some flags being removed as they
+do not apply to the remote-client.
+
+## What you need
+To use the remote-client, you will need a binary for your client and a Podman "backend"; hereafter
+referred to as the Podman node. In this context, a Podman node is a Linux system with Podman
+installed on it and the varlink service activated. You will also need to be able to ssh into this
+system as a user with privileges to the varlink socket (more on this later).
+
+## Building the remote client
+At this time, the Podman remote-client is not being packaged for any distribution. It must be built from
+source. To set up your build environment, see [Installation notes](https://github.com/containers/podman/blob/master/install.md) and follow the
+section [Building from scratch](https://github.com/containers/podman/blob/master/install.md#building-from-scratch). Once you can successfully
+build the regular Podman binary, you can now build the remote-client.
+```
+$ make podman-remote
+```
+Like building the regular Podman, the resulting binary will be in the *bin* directory. This is the binary
+you will run on the remote node later in the instructions.
+
+## Setting up the remote and Podman nodes
+
+To use the remote-client, you must perform some setup on both the remote and Podman nodes. In this case,
+the remote node refers to where the remote-client is being run; and the Podman node refers to where
+Podman and its storage reside.
+
+
+### Podman node setup
+
+Varlink bridge support is provided by the varlink cli command and installed using:
+```
+$ sudo dnf install varlink-cli
+```
+
+The Podman node must have Podman (not the remote-client) installed as normal. If your system uses systemd,
+then simply start the Podman varlink socket.
+```
+$ sudo systemctl start io.podman.socket
+```
+
+If your system cannot use systemd, then you can manually establish the varlink socket with the Podman
+command:
+```
+$ sudo podman --log-level debug varlink --timeout 0 unix://run/podman/io.podman
+```
+
+### Required permissions
+For now, the remote-client requires that you be able to run a privileged Podman and have privileged ssh
+access to the remote system. This limitation is being worked on.
+
+### Remote node setup
+
+#### Initiate an ssh session to the Podman node
+To use the remote client, an ssh connection to the Podman server must be established.
+
+Using the varlink bridge, an ssh tunnel must be initiated to connect to the server. Podman must then be informed of the location of the sshd server on the targeted server
+
+```
+$ export PODMAN_VARLINK_BRIDGE=$'ssh -T -p22 root@remotehost -- "varlink -A \'podman varlink \$VARLINK_ADDRESS\' bridge"'
+$ bin/podman-remote images
+REPOSITORY TAG IMAGE ID CREATED SIZE
+docker.io/library/ubuntu latest 47b19964fb50 2 weeks ago 90.7 MB
+docker.io/library/alpine latest caf27325b298 3 weeks ago 5.8 MB
+quay.io/cevich/gcloud_centos latest 641dad61989a 5 weeks ago 489 MB
+k8s.gcr.io/pause 3.1 da86e6ba6ca1 14 months ago 747 kB
+```
+
+The PODMAN_VARLINK_BRIDGE variable may be added to your log in settings. It does not change per connection.
+
+If coming from a Windows machine, the PODMAN_VARLINK_BRIDGE is formatted as:
+```
+set PODMAN_VARLINK_BRIDGE=C:\Windows\System32\OpenSSH\ssh.exe -T -p22 root@remotehost -- varlink -A "podman varlink $VARLINK_ADDRESS" bridge
+```
+
+The arguments before the `--` are presented to ssh while the arguments after are for the varlink cli. The varlink arguments should be copied verbatim.
+ - `-p` is the port on the remote host for the ssh tunnel. `22` is the default.
+ - `root` is the currently supported user, while `remotehost` is the name or IP address of the host providing the Podman service.
+ - `-i` may be added to select an identity file.
diff --git a/go.mod b/go.mod
index 5fe7d8d53..a4a7aa41f 100644
--- a/go.mod
+++ b/go.mod
@@ -11,11 +11,11 @@ require (
github.com/containernetworking/cni v0.8.0
github.com/containernetworking/plugins v0.8.7
github.com/containers/buildah v1.15.1-0.20200813183340-0a8dc1f8064c
- github.com/containers/common v0.20.3-0.20200827091701-a550d6a98aa3
+ github.com/containers/common v0.21.0
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.5.2
github.com/containers/psgo v1.5.1
- github.com/containers/storage v1.23.2
+ github.com/containers/storage v1.23.5
github.com/coreos/go-systemd/v22 v22.1.0
github.com/cri-o/ocicni v0.2.0
github.com/cyphar/filepath-securejoin v0.2.2
@@ -29,14 +29,14 @@ require (
github.com/godbus/dbus/v5 v5.0.3
github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf
github.com/google/uuid v1.1.2
- github.com/gorilla/mux v1.7.4
+ github.com/gorilla/mux v1.8.0
github.com/gorilla/schema v1.2.0
github.com/hashicorp/go-multierror v1.1.0
github.com/hpcloud/tail v1.0.0
github.com/json-iterator/go v1.1.10
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618
- github.com/onsi/ginkgo v1.14.0
- github.com/onsi/gomega v1.10.1
+ github.com/onsi/ginkgo v1.14.1
+ github.com/onsi/gomega v1.10.2
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/runc v1.0.0-rc91.0.20200708210054-ce54a9d4d79b
@@ -60,8 +60,10 @@ require (
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
golang.org/x/net v0.0.0-20200707034311-ab3426394381
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
- golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1
+ golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed
k8s.io/api v0.0.0-20190620084959-7cf5895f2711
- k8s.io/apimachinery v0.19.0
+ k8s.io/apimachinery v0.19.1
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab
)
+
+replace github.com/containers/image/v5 => github.com/containers/image/v5 v5.5.2-0.20200902171422-1c313b2d23e0
diff --git a/go.sum b/go.sum
index d55bce0fc..9bb058dc8 100644
--- a/go.sum
+++ b/go.sum
@@ -52,7 +52,6 @@ github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmY
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
-github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
@@ -72,24 +71,22 @@ github.com/containernetworking/plugins v0.8.7/go.mod h1:R7lXeZaBzpfqapcAbHRW8/CY
github.com/containers/buildah v1.15.1-0.20200813183340-0a8dc1f8064c h1:elGbJcB3UjBdk7fBxfAzUNS3IT288U1Dzm0gmhgsnB8=
github.com/containers/buildah v1.15.1-0.20200813183340-0a8dc1f8064c/go.mod h1:+IklBLPix5wxPEWn26aDay5f5q4A5VtmNjkdyK5YVsI=
github.com/containers/common v0.19.0/go.mod h1:+NUHV8V5Kmo260ja9Dxtr8ialrDnK4RNzyeEbSgmLac=
-github.com/containers/common v0.20.3-0.20200827091701-a550d6a98aa3 h1:rTSiIMOH3fbCBN+2L8Xr9BJ19AejEIaBQvzkAXZCz/k=
-github.com/containers/common v0.20.3-0.20200827091701-a550d6a98aa3/go.mod h1:z5HJtHWU8sopAHO0Od5s9EpVkXPrLIcNszVvN1Fc3fQ=
+github.com/containers/common v0.21.0 h1:v2U9MrGw0vMgefQf0/uJYBsSnengxLbSORYqhCVEBs0=
+github.com/containers/common v0.21.0/go.mod h1:8w8SVwc+P2p1MOnRMbSKNWXt1Iwd2bKFu2LLZx55DTM=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
-github.com/containers/image/v5 v5.5.1/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
-github.com/containers/image/v5 v5.5.2 h1:fv7FArz0zUnjH0W0l8t90CqWFlFcQrPP6Pug+9dUtVI=
-github.com/containers/image/v5 v5.5.2/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
+github.com/containers/image/v5 v5.5.2-0.20200902171422-1c313b2d23e0 h1:MJ0bKRn2I5I2NJlVzMU7/eP/9yfMCeWaUskl6zgY/nc=
+github.com/containers/image/v5 v5.5.2-0.20200902171422-1c313b2d23e0/go.mod h1:pBnp9KTyDqM84XTHwmk2lXRvTL6jayAQS47GC6PaPGM=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
-github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M=
github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c=
github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
github.com/containers/psgo v1.5.1 h1:MQNb7FLbXqBdqz6u4lI2QWizVz4RSTzs1+Nk9XT1iVA=
github.com/containers/psgo v1.5.1/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU=
-github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc=
github.com/containers/storage v1.23.0/go.mod h1:I1EIAA7B4OwWRSA0b4yq2AW1wjvvfcY0zLWQuwTa4zw=
-github.com/containers/storage v1.23.2 h1:GPZ8PXYezML1gmZ/uFaXQpyps7AH645lmdvvOJwJYNc=
-github.com/containers/storage v1.23.2/go.mod h1:AyTMMiE5ANvZJiqvatQgSZ85wAl5yHucY3NDN/kemr4=
+github.com/containers/storage v1.23.3/go.mod h1:0azTMiuBhArp/VUmH1o4DJAGaaH+qLtEu17pJ/iKJCg=
+github.com/containers/storage v1.23.5 h1:He9I6y1vRVXYoQg4v2Q9HFAcX4dI3V5MCCrjeBcjkCY=
+github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38=
@@ -152,7 +149,6 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsouza/go-dockerclient v1.6.5 h1:vuFDnPcds3LvTWGYb9h0Rty14FLgkjHZdwLDROCdgsw=
github.com/fsouza/go-dockerclient v1.6.5/go.mod h1:GOdftxWLWIbIWKbIMDroKFJzPdg6Iw7r+jX1DDZdVsA=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -211,15 +207,15 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf h1:7+FW5aGwISbqUtkfmIpZJGRgNFg2ioYPvFaUxdqpDsg=
github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
-github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/schema v1.2.0 h1:YufUaxZYCKGFuAq3c96BOhjgd5nmXiOY9NGzF247Tsc=
github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
@@ -231,7 +227,6 @@ github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FK
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -241,8 +236,8 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
-github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/insomniacslk/dhcp v0.0.0-20200420235442-ed3125c2efe7/go.mod h1:CfMdguCK66I5DAUJgGKyNz8aB6vO5dZzkm9Xep6WGvw=
@@ -260,13 +255,14 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.11 h1:K9z59aO18Aywg2b/WSgBaUX99mHy2BES18Cr5lBKZHk=
github.com/klauspost/compress v1.10.11/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
+github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg=
+github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
+github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -278,8 +274,6 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
@@ -291,6 +285,7 @@ github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJd
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/moby/sys/mountinfo v0.1.3 h1:KIrhRO14+AkwKvG/g2yIpNMOUVZ02xNhOw8KY1WsLOI=
github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
github.com/moby/vpnkit v0.4.0/go.mod h1:KyjUrL9cb6ZSNNAUwZfqRjhwwgJ3BJN+kXh0t43WTUQ=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -320,8 +315,9 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4=
+github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -330,6 +326,8 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs=
+github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
@@ -340,7 +338,6 @@ github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.0.0-20190425234816-dae70e8efea4/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc90/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8=
github.com/opencontainers/runc v1.0.0-rc91.0.20200708210054-ce54a9d4d79b h1:wjSgG2Z5xWv1wpAI7JbwKR9aJH0p4HJ+ROZ7ViKh9qU=
github.com/opencontainers/runc v1.0.0-rc91.0.20200708210054-ce54a9d4d79b/go.mod h1:ZuXhqlr4EiRYgDrBDNfSbE4+n9JX4+V107NwAmF7sZA=
@@ -353,7 +350,6 @@ github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mo
github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
-github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY=
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/openshift/imagebuilder v1.1.6 h1:1+YzRxIIefY4QqtCImx6rg+75QrKNfBoPAKxgMo/khM=
@@ -439,7 +435,6 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@@ -454,8 +449,8 @@ github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW
github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4=
-github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ=
+github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
@@ -463,8 +458,8 @@ github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b h1:hdDRrn9OP/roL8a/e/5Z
github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b/go.mod h1:YHaw8N660ESgMgLOZfLQqT1htFItynAUxMesFBho52s=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
-github.com/vbauerster/mpb/v5 v5.2.2 h1:zIICVOm+XD+uV6crpSORaL6I0Q1WqOdvxZTp+r3L9cw=
-github.com/vbauerster/mpb/v5 v5.2.2/go.mod h1:W5Fvgw4dm3/0NhqzV8j6EacfuTe5SvnzBRwiXxDR9ww=
+github.com/vbauerster/mpb/v5 v5.3.0 h1:vgrEJjUzHaSZKDRRxul5Oh4C72Yy/5VEMb0em+9M0mQ=
+github.com/vbauerster/mpb/v5 v5.3.0/go.mod h1:4yTkvAb8Cm4eylAp6t0JRq6pXDkFJ4krUlDqWYkakAs=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
@@ -484,7 +479,6 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
@@ -502,7 +496,6 @@ golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU=
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -527,7 +520,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
@@ -561,7 +553,6 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -570,8 +561,9 @@ golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 h1:sIky/MyNRSHTrdxfsiUSS4WIAMvInbeXljJz+jDjeYE=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed h1:WBkVNH1zd9jg/dK4HCM4lNANnmd12EHC9z+LmcCG4ns=
+golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -598,7 +590,6 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
@@ -607,7 +598,6 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@@ -617,7 +607,6 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
@@ -656,8 +645,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
k8s.io/api v0.0.0-20190620084959-7cf5895f2711 h1:BblVYz/wE5WtBsD/Gvu54KyBUTJMflolzc5I2DTvh50=
k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A=
k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA=
-k8s.io/apimachinery v0.19.0 h1:gjKnAda/HZp5k4xQYjL0K/Yb66IvNqjthCb03QlKpaQ=
-k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
+k8s.io/apimachinery v0.19.1 h1:cwsxZazM/LA9aUsBaL4bRS5ygoM6bYp8dFk22DSYQa4=
+k8s.io/apimachinery v0.19.1/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab h1:E8Fecph0qbNsAbijJJQryKu4Oi9QTp5cVpjTE+nqg6g=
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index c41d81a2b..5a0a0edfa 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -380,6 +380,8 @@ func (c *Container) setupStorageMapping(dest, from *storage.IDMappingOptions) {
}
dest.GIDMap = append(dest.GIDMap, g)
}
+ dest.HostUIDMapping = false
+ dest.HostGIDMapping = false
}
}
@@ -957,8 +959,10 @@ func (c *Container) completeNetworkSetup() error {
if err := c.syncContainer(); err != nil {
return err
}
- if c.config.NetMode.IsSlirp4netns() {
+ if rootless.IsRootless() {
return c.runtime.setupRootlessNetNS(c)
+ } else if c.config.NetMode.IsSlirp4netns() {
+ return c.runtime.setupSlirp4netns(c)
}
if err := c.runtime.setupNetNS(c); err != nil {
return err
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index c61c1a325..86a28c176 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -36,7 +36,7 @@ import (
"github.com/containers/podman/v2/utils"
"github.com/containers/storage/pkg/archive"
securejoin "github.com/cyphar/filepath-securejoin"
- User "github.com/opencontainers/runc/libcontainer/user"
+ runcuser "github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
@@ -84,7 +84,11 @@ func (c *Container) prepare() error {
// Set up network namespace if not already set up
noNetNS := c.state.NetNS == nil
if c.config.CreateNetNS && noNetNS && !c.config.PostConfigureNetNS {
- netNS, networkStatus, createNetNSErr = c.runtime.createNetNS(c)
+ if rootless.IsRootless() && len(c.config.Networks) > 0 {
+ netNS, networkStatus, createNetNSErr = AllocRootlessCNI(context.Background(), c)
+ } else {
+ netNS, networkStatus, createNetNSErr = c.runtime.createNetNS(c)
+ }
if createNetNSErr != nil {
return
}
@@ -98,8 +102,12 @@ func (c *Container) prepare() error {
}
// handle rootless network namespace setup
- if noNetNS && c.config.NetMode.IsSlirp4netns() && !c.config.PostConfigureNetNS {
- createNetNSErr = c.runtime.setupRootlessNetNS(c)
+ if noNetNS && !c.config.PostConfigureNetNS {
+ if rootless.IsRootless() {
+ createNetNSErr = c.runtime.setupRootlessNetNS(c)
+ } else if c.config.NetMode.IsSlirp4netns() {
+ createNetNSErr = c.runtime.setupSlirp4netns(c)
+ }
}
}()
// Mount storage if not mounted
@@ -563,7 +571,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// systemd expects to have /run, /run/lock and /tmp on tmpfs
// It also expects to be able to write to /sys/fs/cgroup/systemd and /var/log/journal
func (c *Container) setupSystemd(mounts []spec.Mount, g generate.Generator) error {
- options := []string{"rw", "rprivate", "noexec", "nosuid", "nodev"}
+ options := []string{"rw", "rprivate", "nosuid", "nodev"}
for _, dest := range []string{"/run", "/run/lock"} {
if MountExists(mounts, dest) {
continue
@@ -627,7 +635,7 @@ func (c *Container) setupSystemd(mounts []spec.Mount, g generate.Generator) erro
Destination: "/sys/fs/cgroup/systemd",
Type: "bind",
Source: "/sys/fs/cgroup/systemd",
- Options: []string{"bind", "nodev", "noexec", "nosuid", "rprivate"},
+ Options: []string{"bind", "nodev", "nosuid", "rprivate"},
}
g.AddMount(systemdMnt)
g.AddLinuxMaskedPaths("/sys/fs/cgroup/systemd/release_agent")
@@ -1268,7 +1276,7 @@ func (c *Container) makeBindMounts() error {
// SHM is always added when we mount the container
c.state.BindMounts["/dev/shm"] = c.config.ShmDir
- newPasswd, err := c.generatePasswd()
+ newPasswd, newGroup, err := c.generatePasswdAndGroup()
if err != nil {
return errors.Wrapf(err, "error creating temporary passwd file for container %s", c.ID())
}
@@ -1278,9 +1286,16 @@ func (c *Container) makeBindMounts() error {
// If it already exists, delete so we can recreate
delete(c.state.BindMounts, "/etc/passwd")
}
- logrus.Debugf("adding entry to /etc/passwd for non existent default user")
c.state.BindMounts["/etc/passwd"] = newPasswd
}
+ if newGroup != "" {
+ // Make /etc/group
+ if _, ok := c.state.BindMounts["/etc/group"]; ok {
+ // If it already exists, delete so we can recreate
+ delete(c.state.BindMounts, "/etc/group")
+ }
+ c.state.BindMounts["/etc/group"] = newGroup
+ }
// Make /etc/hostname
// This should never change, so no need to recreate if it exists
@@ -1499,23 +1514,171 @@ func (c *Container) getHosts() string {
return hosts
}
+// generateGroupEntry generates an entry or entries into /etc/group as
+// required by container configuration.
+// Generatlly speaking, we will make an entry under two circumstances:
+// 1. The container is started as a specific user:group, and that group is both
+// numeric, and does not already exist in /etc/group.
+// 2. It is requested that Libpod add the group that launched Podman to
+// /etc/group via AddCurrentUserPasswdEntry (though this does not trigger if
+// the group in question already exists in /etc/passwd).
+// Returns group entry (as a string that can be appended to /etc/group) and any
+// error that occurred.
+func (c *Container) generateGroupEntry() (string, error) {
+ groupString := ""
+
+ // Things we *can't* handle: adding the user we added in
+ // generatePasswdEntry to any *existing* groups.
+ addedGID := 0
+ if c.config.AddCurrentUserPasswdEntry {
+ entry, gid, err := c.generateCurrentUserGroupEntry()
+ if err != nil {
+ return "", err
+ }
+ groupString += entry
+ addedGID = gid
+ }
+ if c.config.User != "" {
+ entry, _, err := c.generateUserGroupEntry(addedGID)
+ if err != nil {
+ return "", err
+ }
+ groupString += entry
+ }
+
+ return groupString, nil
+}
+
+// Make an entry in /etc/group for the group of the user running podman iff we
+// are rootless.
+func (c *Container) generateCurrentUserGroupEntry() (string, int, error) {
+ gid := rootless.GetRootlessGID()
+ if gid == 0 {
+ return "", 0, nil
+ }
+
+ g, err := user.LookupGroupId(strconv.Itoa(gid))
+ if err != nil {
+ return "", 0, errors.Wrapf(err, "failed to get current group")
+ }
+
+ // Lookup group name to see if it exists in the image.
+ _, err = lookup.GetGroup(c.state.Mountpoint, g.Name)
+ if err != runcuser.ErrNoGroupEntries {
+ return "", 0, err
+ }
+
+ // Lookup GID to see if it exists in the image.
+ _, err = lookup.GetGroup(c.state.Mountpoint, g.Gid)
+ if err != runcuser.ErrNoGroupEntries {
+ return "", 0, err
+ }
+
+ // We need to get the username of the rootless user so we can add it to
+ // the group.
+ username := ""
+ uid := rootless.GetRootlessUID()
+ if uid != 0 {
+ u, err := user.LookupId(strconv.Itoa(uid))
+ if err != nil {
+ return "", 0, errors.Wrapf(err, "failed to get current user to make group entry")
+ }
+ username = u.Username
+ }
+
+ // Make the entry.
+ return fmt.Sprintf("%s:x:%s:%s\n", g.Name, g.Gid, username), gid, nil
+}
+
+// Make an entry in /etc/group for the group the container was specified to run
+// as.
+func (c *Container) generateUserGroupEntry(addedGID int) (string, int, error) {
+ if c.config.User == "" {
+ return "", 0, nil
+ }
+
+ splitUser := strings.SplitN(c.config.User, ":", 2)
+ group := splitUser[0]
+ if len(splitUser) > 1 {
+ group = splitUser[1]
+ }
+
+ gid, err := strconv.ParseUint(group, 10, 32)
+ if err != nil {
+ return "", 0, nil
+ }
+
+ if addedGID != 0 && addedGID == int(gid) {
+ return "", 0, nil
+ }
+
+ // Check if the group already exists
+ _, err = lookup.GetGroup(c.state.Mountpoint, group)
+ if err != runcuser.ErrNoGroupEntries {
+ return "", 0, err
+ }
+
+ return fmt.Sprintf("%d:x:%d:%s\n", gid, gid, splitUser[0]), int(gid), nil
+}
+
+// generatePasswdEntry generates an entry or entries into /etc/passwd as
+// required by container configuration.
+// Generally speaking, we will make an entry under two circumstances:
+// 1. The container is started as a specific user who is not in /etc/passwd.
+// This only triggers if the user is given as a *numeric* ID.
+// 2. It is requested that Libpod add the user that launched Podman to
+// /etc/passwd via AddCurrentUserPasswdEntry (though this does not trigger if
+// the user in question already exists in /etc/passwd) or the UID to be added
+// is 0).
+// Returns password entry (as a string that can be appended to /etc/passwd) and
+// any error that occurred.
+func (c *Container) generatePasswdEntry() (string, error) {
+ passwdString := ""
+
+ addedUID := 0
+ if c.config.AddCurrentUserPasswdEntry {
+ entry, uid, _, err := c.generateCurrentUserPasswdEntry()
+ if err != nil {
+ return "", err
+ }
+ passwdString += entry
+ addedUID = uid
+ }
+ if c.config.User != "" {
+ entry, _, _, err := c.generateUserPasswdEntry(addedUID)
+ if err != nil {
+ return "", err
+ }
+ passwdString += entry
+ }
+
+ return passwdString, nil
+}
+
// generateCurrentUserPasswdEntry generates an /etc/passwd entry for the user
-// running the container engine
-func (c *Container) generateCurrentUserPasswdEntry() (string, error) {
+// running the container engine.
+// Returns a passwd entry for the user, and the UID and GID of the added entry.
+func (c *Container) generateCurrentUserPasswdEntry() (string, int, int, error) {
uid := rootless.GetRootlessUID()
if uid == 0 {
- return "", nil
+ return "", 0, 0, nil
}
- u, err := user.LookupId(strconv.Itoa(rootless.GetRootlessUID()))
+ u, err := user.LookupId(strconv.Itoa(uid))
if err != nil {
- return "", errors.Wrapf(err, "failed to get current user")
+ return "", 0, 0, errors.Wrapf(err, "failed to get current user")
}
// Lookup the user to see if it exists in the container image.
_, err = lookup.GetUser(c.state.Mountpoint, u.Username)
- if err != User.ErrNoPasswdEntries {
- return "", err
+ if err != runcuser.ErrNoPasswdEntries {
+ return "", 0, 0, err
+ }
+
+ // Lookup the UID to see if it exists in the container image.
+ _, err = lookup.GetUser(c.state.Mountpoint, u.Uid)
+ if err != runcuser.ErrNoPasswdEntries {
+ return "", 0, 0, err
}
// If the user's actual home directory exists, or was mounted in - use
@@ -1525,18 +1688,22 @@ func (c *Container) generateCurrentUserPasswdEntry() (string, error) {
homeDir = u.HomeDir
}
- return fmt.Sprintf("%s:x:%s:%s:%s:%s:/bin/sh\n", u.Username, u.Uid, u.Gid, u.Username, homeDir), nil
+ return fmt.Sprintf("%s:*:%s:%s:%s:%s:/bin/sh\n", u.Username, u.Uid, u.Gid, u.Username, homeDir), uid, rootless.GetRootlessGID(), nil
}
// generateUserPasswdEntry generates an /etc/passwd entry for the container user
// to run in the container.
-func (c *Container) generateUserPasswdEntry() (string, error) {
+// The UID and GID of the added entry will also be returned.
+// Accepts one argument, that being any UID that has already been added to the
+// passwd file by other functions; if it matches the UID we were given, we don't
+// need to do anything.
+func (c *Container) generateUserPasswdEntry(addedUID int) (string, int, int, error) {
var (
groupspec string
gid int
)
if c.config.User == "" {
- return "", nil
+ return "", 0, 0, nil
}
splitSpec := strings.SplitN(c.config.User, ":", 2)
userspec := splitSpec[0]
@@ -1546,13 +1713,17 @@ func (c *Container) generateUserPasswdEntry() (string, error) {
// If a non numeric User, then don't generate passwd
uid, err := strconv.ParseUint(userspec, 10, 32)
if err != nil {
- return "", nil
+ return "", 0, 0, nil
+ }
+
+ if addedUID != 0 && int(uid) == addedUID {
+ return "", 0, 0, nil
}
// Lookup the user to see if it exists in the container image
_, err = lookup.GetUser(c.state.Mountpoint, userspec)
- if err != User.ErrNoPasswdEntries {
- return "", err
+ if err != runcuser.ErrNoPasswdEntries {
+ return "", 0, 0, err
}
if groupspec != "" {
@@ -1562,96 +1733,180 @@ func (c *Container) generateUserPasswdEntry() (string, error) {
} else {
group, err := lookup.GetGroup(c.state.Mountpoint, groupspec)
if err != nil {
- return "", errors.Wrapf(err, "unable to get gid %s from group file", groupspec)
+ return "", 0, 0, errors.Wrapf(err, "unable to get gid %s from group file", groupspec)
}
gid = group.Gid
}
}
- return fmt.Sprintf("%d:x:%d:%d:container user:%s:/bin/sh\n", uid, uid, gid, c.WorkingDir()), nil
+ return fmt.Sprintf("%d:*:%d:%d:container user:%s:/bin/sh\n", uid, uid, gid, c.WorkingDir()), int(uid), gid, nil
}
-// generatePasswd generates a container specific passwd file,
-// iff g.config.User is a number
-func (c *Container) generatePasswd() (string, error) {
+// generatePasswdAndGroup generates container-specific passwd and group files
+// iff g.config.User is a number or we are configured to make a passwd entry for
+// the current user.
+// Returns path to file to mount at /etc/passwd, path to file to mount at
+// /etc/group, and any error that occurred. If no passwd/group file were
+// required, the empty string will be returned for those path (this may occur
+// even if no error happened).
+// This may modify the mounted container's /etc/passwd and /etc/group instead of
+// making copies to bind-mount in, so we don't break useradd (it wants to make a
+// copy of /etc/passwd and rename the copy to /etc/passwd, which is impossible
+// with a bind mount). This is done in cases where the container is *not*
+// read-only. In this case, the function will return nothing ("", "", nil).
+func (c *Container) generatePasswdAndGroup() (string, string, error) {
if !c.config.AddCurrentUserPasswdEntry && c.config.User == "" {
- return "", nil
+ return "", "", nil
}
+
+ needPasswd := true
+ needGroup := true
+
+ // First, check if there's a mount at /etc/passwd or group, we don't
+ // want to interfere with user mounts.
if MountExists(c.config.Spec.Mounts, "/etc/passwd") {
- return "", nil
+ needPasswd = false
}
- // Re-use passwd if possible
- passwdPath := filepath.Join(c.config.StaticDir, "passwd")
- if _, err := os.Stat(passwdPath); err == nil {
- return passwdPath, nil
+ if MountExists(c.config.Spec.Mounts, "/etc/group") {
+ needGroup = false
}
- // Check if container has a /etc/passwd - if it doesn't do nothing.
- passwdPath, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd")
- if err != nil {
- return "", errors.Wrapf(err, "error creating path to container %s /etc/passwd", c.ID())
+
+ // Next, check if we already made the files. If we didn, don't need to
+ // do anything more.
+ if needPasswd {
+ passwdPath := filepath.Join(c.config.StaticDir, "passwd")
+ if _, err := os.Stat(passwdPath); err == nil {
+ needPasswd = false
+ }
}
- if _, err := os.Stat(passwdPath); err != nil {
- if os.IsNotExist(err) {
- return "", nil
+ if needGroup {
+ groupPath := filepath.Join(c.config.StaticDir, "group")
+ if _, err := os.Stat(groupPath); err == nil {
+ needGroup = false
}
- return "", errors.Wrapf(err, "unable to access container %s /etc/passwd", c.ID())
}
- pwd := ""
- if c.config.User != "" {
- entry, err := c.generateUserPasswdEntry()
+
+ // Next, check if the container even has a /etc/passwd or /etc/group.
+ // If it doesn't we don't want to create them ourselves.
+ if needPasswd {
+ exists, err := c.checkFileExistsInRootfs("/etc/passwd")
if err != nil {
- return "", err
+ return "", "", err
}
- pwd += entry
+ needPasswd = exists
}
- if c.config.AddCurrentUserPasswdEntry {
- entry, err := c.generateCurrentUserPasswdEntry()
+ if needGroup {
+ exists, err := c.checkFileExistsInRootfs("/etc/group")
if err != nil {
- return "", err
+ return "", "", err
}
- pwd += entry
+ needGroup = exists
}
- if pwd == "" {
- return "", nil
+
+ // If we don't need a /etc/passwd or /etc/group at this point we can
+ // just return.
+ if !needPasswd && !needGroup {
+ return "", "", nil
}
- // If we are *not* read-only - edit /etc/passwd in the container.
- // This is *gross* (shows up in changes to the container, will be
- // committed to images based on the container) but it actually allows us
- // to add users to the container (a bind mount breaks useradd).
- // We should never get here twice, because generateUserPasswdEntry will
- // not return anything if the user already exists in /etc/passwd.
- if !c.IsReadOnly() {
- containerPasswd, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd")
+ passwdPath := ""
+ groupPath := ""
+
+ ro := c.IsReadOnly()
+
+ if needPasswd {
+ passwdEntry, err := c.generatePasswdEntry()
if err != nil {
- return "", errors.Wrapf(err, "error looking up location of container %s /etc/passwd", c.ID())
+ return "", "", err
}
- f, err := os.OpenFile(containerPasswd, os.O_APPEND|os.O_WRONLY, 0600)
+ needsWrite := passwdEntry != ""
+ switch {
+ case ro && needsWrite:
+ logrus.Debugf("Making /etc/passwd for container %s", c.ID())
+ originPasswdFile, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd")
+ if err != nil {
+ return "", "", errors.Wrapf(err, "error creating path to container %s /etc/passwd", c.ID())
+ }
+ orig, err := ioutil.ReadFile(originPasswdFile)
+ if err != nil && !os.IsNotExist(err) {
+ return "", "", errors.Wrapf(err, "unable to read passwd file %s", originPasswdFile)
+ }
+ passwdFile, err := c.writeStringToStaticDir("passwd", string(orig)+passwdEntry)
+ if err != nil {
+ return "", "", errors.Wrapf(err, "failed to create temporary passwd file")
+ }
+ if err := os.Chmod(passwdFile, 0644); err != nil {
+ return "", "", err
+ }
+ passwdPath = passwdFile
+ case !ro && needsWrite:
+ logrus.Debugf("Modifying container %s /etc/passwd", c.ID())
+ containerPasswd, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd")
+ if err != nil {
+ return "", "", errors.Wrapf(err, "error looking up location of container %s /etc/passwd", c.ID())
+ }
+
+ f, err := os.OpenFile(containerPasswd, os.O_APPEND|os.O_WRONLY, 0600)
+ if err != nil {
+ return "", "", errors.Wrapf(err, "error opening container %s /etc/passwd", c.ID())
+ }
+ defer f.Close()
+
+ if _, err := f.WriteString(passwdEntry); err != nil {
+ return "", "", errors.Wrapf(err, "unable to append to container %s /etc/passwd", c.ID())
+ }
+ default:
+ logrus.Debugf("Not modifying container %s /etc/passwd", c.ID())
+ }
+ }
+ if needGroup {
+ groupEntry, err := c.generateGroupEntry()
if err != nil {
- return "", errors.Wrapf(err, "error opening container %s /etc/passwd", c.ID())
+ return "", "", err
}
- defer f.Close()
- if _, err := f.WriteString(pwd); err != nil {
- return "", errors.Wrapf(err, "unable to append to container %s /etc/passwd", c.ID())
- }
+ needsWrite := groupEntry != ""
+ switch {
+ case ro && needsWrite:
+ logrus.Debugf("Making /etc/group for container %s", c.ID())
+ originGroupFile, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/group")
+ if err != nil {
+ return "", "", errors.Wrapf(err, "error creating path to container %s /etc/group", c.ID())
+ }
+ orig, err := ioutil.ReadFile(originGroupFile)
+ if err != nil && !os.IsNotExist(err) {
+ return "", "", errors.Wrapf(err, "unable to read group file %s", originGroupFile)
+ }
+ groupFile, err := c.writeStringToStaticDir("group", string(orig)+groupEntry)
+ if err != nil {
+ return "", "", errors.Wrapf(err, "failed to create temporary group file")
+ }
+ if err := os.Chmod(groupFile, 0644); err != nil {
+ return "", "", err
+ }
+ groupPath = groupFile
+ case !ro && needsWrite:
+ logrus.Debugf("Modifying container %s /etc/group", c.ID())
+ containerGroup, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/group")
+ if err != nil {
+ return "", "", errors.Wrapf(err, "error looking up location of container %s /etc/group", c.ID())
+ }
- return "", nil
- }
+ f, err := os.OpenFile(containerGroup, os.O_APPEND|os.O_WRONLY, 0600)
+ if err != nil {
+ return "", "", errors.Wrapf(err, "error opening container %s /etc/group", c.ID())
+ }
+ defer f.Close()
- originPasswdFile := filepath.Join(c.state.Mountpoint, "/etc/passwd")
- orig, err := ioutil.ReadFile(originPasswdFile)
- if err != nil && !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "unable to read passwd file %s", originPasswdFile)
- }
- passwdFile, err := c.writeStringToStaticDir("passwd", string(orig)+pwd)
- if err != nil {
- return "", errors.Wrapf(err, "failed to create temporary passwd file")
- }
- if err := os.Chmod(passwdFile, 0644); err != nil {
- return "", err
+ if _, err := f.WriteString(groupEntry); err != nil {
+ return "", "", errors.Wrapf(err, "unable to append to container %s /etc/group", c.ID())
+ }
+ default:
+ logrus.Debugf("Not modifying container %s /etc/group", c.ID())
+ }
}
- return passwdFile, nil
+
+ return passwdPath, groupPath, nil
}
func (c *Container) copyOwnerAndPerms(source, dest string) error {
@@ -1743,3 +1998,23 @@ func (c *Container) copyTimezoneFile(zonePath string) (string, error) {
func (c *Container) cleanupOverlayMounts() error {
return overlay.CleanupContent(c.config.StaticDir)
}
+
+// Check if a file exists at the given path in the container's root filesystem.
+// Container must already be mounted for this to be used.
+func (c *Container) checkFileExistsInRootfs(file string) (bool, error) {
+ checkPath, err := securejoin.SecureJoin(c.state.Mountpoint, file)
+ if err != nil {
+ return false, errors.Wrapf(err, "cannot create path to container %s file %q", c.ID(), file)
+ }
+ stat, err := os.Stat(checkPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, errors.Wrapf(err, "error accessing container %s file %q", c.ID(), file)
+ }
+ if stat.IsDir() {
+ return false, nil
+ }
+ return true, nil
+}
diff --git a/libpod/container_internal_linux_test.go b/libpod/container_internal_linux_test.go
index 41c22fb45..1465ffbea 100644
--- a/libpod/container_internal_linux_test.go
+++ b/libpod/container_internal_linux_test.go
@@ -29,16 +29,42 @@ func TestGenerateUserPasswdEntry(t *testing.T) {
Mountpoint: "/does/not/exist/tmp/",
},
}
- user, err := c.generateUserPasswdEntry()
+ user, _, _, err := c.generateUserPasswdEntry(0)
if err != nil {
t.Fatal(err)
}
- assert.Equal(t, user, "123:x:123:456:container user:/:/bin/sh\n")
+ assert.Equal(t, user, "123:*:123:456:container user:/:/bin/sh\n")
c.config.User = "567"
- user, err = c.generateUserPasswdEntry()
+ user, _, _, err = c.generateUserPasswdEntry(0)
if err != nil {
t.Fatal(err)
}
- assert.Equal(t, user, "567:x:567:0:container user:/:/bin/sh\n")
+ assert.Equal(t, user, "567:*:567:0:container user:/:/bin/sh\n")
+}
+
+func TestGenerateUserGroupEntry(t *testing.T) {
+ c := Container{
+ config: &ContainerConfig{
+ Spec: &spec.Spec{},
+ ContainerSecurityConfig: ContainerSecurityConfig{
+ User: "123:456",
+ },
+ },
+ state: &ContainerState{
+ Mountpoint: "/does/not/exist/tmp/",
+ },
+ }
+ group, _, err := c.generateUserGroupEntry(0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, group, "456:x:456:123\n")
+
+ c.config.User = "567"
+ group, _, err = c.generateUserGroupEntry(0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, group, "567:x:567:567\n")
}
diff --git a/libpod/container_validate.go b/libpod/container_validate.go
index d657e3549..b78168cd1 100644
--- a/libpod/container_validate.go
+++ b/libpod/container_validate.go
@@ -2,7 +2,6 @@ package libpod
import (
"github.com/containers/podman/v2/libpod/define"
- "github.com/containers/podman/v2/pkg/rootless"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
@@ -68,16 +67,6 @@ func (c *Container) validate() error {
}
}
- // Rootless has some requirements, compared to networks.
- if rootless.IsRootless() {
- if len(c.config.Networks) > 0 {
- return errors.Wrapf(define.ErrInvalidArg, "cannot join CNI networks if running rootless")
- }
-
- // TODO: Should we make sure network mode is set to Slirp if set
- // at all?
- }
-
// Can only set static IP or MAC is creating a network namespace.
if !c.config.CreateNetNS && (c.config.StaticIP != nil || c.config.StaticMAC != nil) {
return errors.Wrapf(define.ErrInvalidArg, "cannot set static IP or MAC address if not creating a network namespace")
diff --git a/libpod/define/errors.go b/libpod/define/errors.go
index f80b1d6e3..7714ebbf0 100644
--- a/libpod/define/errors.go
+++ b/libpod/define/errors.go
@@ -161,4 +161,8 @@ var (
// ErrNetworkOnPodContainer indicates the user wishes to alter network attributes on a container
// in a pod. This cannot be done as the infra container has all the network information
ErrNetworkOnPodContainer = errors.New("network cannot be configured when it is shared with a pod")
+
+ // ErrStoreNotInitialized indicates that the container storage was never
+ // initilized.
+ ErrStoreNotInitialized = errors.New("the container storage was never initilized")
)
diff --git a/libpod/image/image.go b/libpod/image/image.go
index dee2ce0ee..850a48eae 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -17,6 +17,7 @@ import (
"github.com/containers/common/pkg/retry"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/directory"
+ "github.com/containers/image/v5/docker/archive"
dockerarchive "github.com/containers/image/v5/docker/archive"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
@@ -173,13 +174,182 @@ func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile
return newImage, nil
}
+// SaveImages stores one more images in a multi-image archive.
+// Note that only `docker-archive` supports storing multiple
+// image.
+func (ir *Runtime) SaveImages(ctx context.Context, namesOrIDs []string, format string, outputFile string, quiet bool) (finalErr error) {
+ if format != DockerArchive {
+ return errors.Errorf("multi-image archives are only supported in in the %q format", DockerArchive)
+ }
+
+ sys := GetSystemContext("", "", false)
+
+ archWriter, err := archive.NewWriter(sys, outputFile)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ err := archWriter.Close()
+ if err == nil {
+ return
+ }
+ if finalErr == nil {
+ finalErr = err
+ return
+ }
+ finalErr = errors.Wrap(finalErr, err.Error())
+ }()
+
+ // Decide whether c/image's progress bars should use stderr or stdout.
+ // Use stderr in case we need to be quiet or if the output is set to
+ // stdout. If the output is set of stdout, any log message there would
+ // corrupt the tarfile.
+ writer := os.Stdout
+ if quiet {
+ writer = os.Stderr
+ }
+
+ // extend an image with additional tags
+ type imageData struct {
+ *Image
+ tags []reference.NamedTagged
+ }
+
+ // Look up the images (and their tags) in the local storage.
+ imageMap := make(map[string]*imageData) // to group tags for an image
+ imageQueue := []string{} // to preserve relative image order
+ for _, nameOrID := range namesOrIDs {
+ // Look up the name or ID in the local image storage.
+ localImage, err := ir.NewFromLocal(nameOrID)
+ if err != nil {
+ return err
+ }
+ id := localImage.ID()
+
+ iData, exists := imageMap[id]
+ if !exists {
+ imageQueue = append(imageQueue, id)
+ iData = &imageData{Image: localImage}
+ imageMap[id] = iData
+ }
+
+ // Unless we referred to an ID, add the input as a tag.
+ if !strings.HasPrefix(id, nameOrID) {
+ tag, err := NormalizedTag(nameOrID)
+ if err != nil {
+ return err
+ }
+ refTagged, isTagged := tag.(reference.NamedTagged)
+ if isTagged {
+ iData.tags = append(iData.tags, refTagged)
+ }
+ }
+ }
+
+ policyContext, err := getPolicyContext(sys)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := policyContext.Destroy(); err != nil {
+ logrus.Errorf("failed to destroy policy context: %q", err)
+ }
+ }()
+
+ // Now copy the images one-by-one.
+ for _, id := range imageQueue {
+ dest, err := archWriter.NewReference(nil)
+ if err != nil {
+ return err
+ }
+
+ img := imageMap[id]
+ copyOptions := getCopyOptions(sys, writer, nil, nil, SigningOptions{}, "", img.tags)
+ copyOptions.DestinationCtx.SystemRegistriesConfPath = registries.SystemRegistriesConfPath()
+
+ // For copying, we need a source reference that we can create
+ // from the image.
+ src, err := is.Transport.NewStoreReference(img.imageruntime.store, nil, id)
+ if err != nil {
+ return errors.Wrapf(err, "error getting source imageReference for %q", img.InputName)
+ }
+ _, err = cp.Image(ctx, policyContext, dest, src, copyOptions)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// LoadAllImagesFromDockerArchive loads all images from the docker archive that
+// fileName points to.
+func (ir *Runtime) LoadAllImagesFromDockerArchive(ctx context.Context, fileName string, signaturePolicyPath string, writer io.Writer) ([]*Image, error) {
+ if signaturePolicyPath == "" {
+ signaturePolicyPath = ir.SignaturePolicyPath
+ }
+
+ sc := GetSystemContext(signaturePolicyPath, "", false)
+ reader, err := archive.NewReader(sc, fileName)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ if err := reader.Close(); err != nil {
+ logrus.Errorf(err.Error())
+ }
+ }()
+
+ refLists, err := reader.List()
+ if err != nil {
+ return nil, err
+ }
+
+ refPairs := []pullRefPair{}
+ for _, refList := range refLists {
+ for _, ref := range refList {
+ pairs, err := ir.getPullRefPairsFromDockerArchiveReference(ctx, reader, ref, sc)
+ if err != nil {
+ return nil, err
+ }
+ refPairs = append(refPairs, pairs...)
+ }
+ }
+
+ goal := pullGoal{
+ pullAllPairs: true,
+ usedSearchRegistries: false,
+ refPairs: refPairs,
+ searchedRegistries: nil,
+ }
+
+ defer goal.cleanUp()
+ imageNames, err := ir.doPullImage(ctx, sc, goal, writer, SigningOptions{}, &DockerRegistryOptions{}, &retry.RetryOptions{}, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ newImages := make([]*Image, 0, len(imageNames))
+ for _, name := range imageNames {
+ newImage, err := ir.NewFromLocal(name)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error retrieving local image after pulling %s", name)
+ }
+ newImages = append(newImages, newImage)
+ }
+ ir.newImageEvent(events.LoadFromArchive, "")
+ return newImages, nil
+}
+
// LoadFromArchiveReference creates a new image object for images pulled from a tar archive and the like (podman load)
// This function is needed because it is possible for a tar archive to have multiple tags for one image
func (ir *Runtime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*Image, error) {
if signaturePolicyPath == "" {
signaturePolicyPath = ir.SignaturePolicyPath
}
- imageNames, err := ir.pullImageFromReference(ctx, srcRef, writer, "", signaturePolicyPath, SigningOptions{}, &DockerRegistryOptions{}, &retry.RetryOptions{MaxRetry: maxRetry})
+
+ imageNames, err := ir.pullImageFromReference(ctx, srcRef, writer, "", signaturePolicyPath, SigningOptions{}, &DockerRegistryOptions{}, &retry.RetryOptions{})
if err != nil {
return nil, errors.Wrapf(err, "unable to pull %s", transports.ImageName(srcRef))
}
@@ -466,6 +636,14 @@ func (ir *Runtime) getImage(image string) (*storage.Image, error) {
return img, nil
}
+func (ir *Runtime) ImageNames(id string) ([]string, error) {
+ myImage, err := ir.getImage(id)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting image %s ", id)
+ }
+ return myImage.Names, nil
+}
+
// GetImages retrieves all images present in storage
func (ir *Runtime) GetImages() ([]*Image, error) {
return ir.getImages(false)
@@ -1247,11 +1425,14 @@ func areParentAndChild(parent, child *imgspecv1.Image) bool {
// candidate parent's diff IDs, which together would have
// controlled which layers were used
- // issue #7444 describes a panic where the length of child.RootFS.DiffIDs
- // is checked but child is nil. Adding a simple band-aid approach to prevent
- // the problem until the origin of the problem can be worked out in the issue
- // itself.
- if child == nil || len(parent.RootFS.DiffIDs) > len(child.RootFS.DiffIDs) {
+ // Both, child and parent, may be nil when the storage is left in an
+ // incoherent state. Issue #7444 describes such a case when a build
+ // has been killed.
+ if child == nil || parent == nil {
+ return false
+ }
+
+ if len(parent.RootFS.DiffIDs) > len(child.RootFS.DiffIDs) {
return false
}
childUsesCandidateDiffs := true
diff --git a/libpod/image/layer_tree.go b/libpod/image/layer_tree.go
index 3699655fd..18101575e 100644
--- a/libpod/image/layer_tree.go
+++ b/libpod/image/layer_tree.go
@@ -32,7 +32,9 @@ func (t *layerTree) toOCI(ctx context.Context, i *Image) (*ociv1.Image, error) {
oci, exists := t.ociCache[i.ID()]
if !exists {
oci, err = i.ociv1Image(ctx)
- t.ociCache[i.ID()] = oci
+ if err == nil {
+ t.ociCache[i.ID()] = oci
+ }
}
return oci, err
}
diff --git a/libpod/image/prune.go b/libpod/image/prune.go
index 5a9ca5d8e..fcc65fb03 100644
--- a/libpod/image/prune.go
+++ b/libpod/image/prune.go
@@ -137,7 +137,7 @@ func (ir *Runtime) PruneImages(ctx context.Context, all bool, filter []string) (
}
if err := p.Remove(ctx, true); err != nil {
if errors.Cause(err) == storage.ErrImageUsedByContainer {
- logrus.Warnf("Failed to prune image %s as it is in use: %v", p.ID(), err)
+ logrus.Warnf("Failed to prune image %s as it is in use: %v.\nA container associated with containers/storage i.e. Buildah, CRI-O, etc., maybe associated with this image.\nUsing the rmi command with the --force option will remove the container and image, but may cause failures for other dependent systems.", p.ID(), err)
continue
}
return nil, errors.Wrap(err, "failed to prune image")
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index bdcda4016..94d6af4c2 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -11,8 +11,8 @@ import (
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/directory"
"github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/docker/archive"
dockerarchive "github.com/containers/image/v5/docker/archive"
- "github.com/containers/image/v5/docker/tarfile"
ociarchive "github.com/containers/image/v5/oci/archive"
oci "github.com/containers/image/v5/oci/layout"
is "github.com/containers/image/v5/storage"
@@ -61,12 +61,26 @@ type pullRefPair struct {
dstRef types.ImageReference
}
+// cleanUpFunc is a function prototype for clean-up functions.
+type cleanUpFunc func() error
+
// pullGoal represents the prepared image references and decided behavior to be executed by imagePull
type pullGoal struct {
refPairs []pullRefPair
- pullAllPairs bool // Pull all refPairs instead of stopping on first success.
- usedSearchRegistries bool // refPairs construction has depended on registries.GetRegistries()
- searchedRegistries []string // The list of search registries used; set only if usedSearchRegistries
+ pullAllPairs bool // Pull all refPairs instead of stopping on first success.
+ usedSearchRegistries bool // refPairs construction has depended on registries.GetRegistries()
+ searchedRegistries []string // The list of search registries used; set only if usedSearchRegistries
+ cleanUpFuncs []cleanUpFunc // Mainly used to close long-lived objects (e.g., an archive.Reader)
+}
+
+// cleanUp invokes all cleanUpFuncs. Certain resources may not be available
+// anymore. Errors are logged.
+func (p *pullGoal) cleanUp() {
+ for _, f := range p.cleanUpFuncs {
+ if err := f(); err != nil {
+ logrus.Error(err.Error())
+ }
+ }
}
// singlePullRefPairGoal returns a no-frills pull goal for the specified reference pair.
@@ -114,7 +128,49 @@ func (ir *Runtime) getSinglePullRefPairGoal(srcRef types.ImageReference, destNam
return singlePullRefPairGoal(rp), nil
}
+// getPullRefPairsFromDockerArchiveReference returns a slice of pullRefPairs
+// for the specified docker reference and the corresponding archive.Reader.
+func (ir *Runtime) getPullRefPairsFromDockerArchiveReference(ctx context.Context, reader *archive.Reader, ref types.ImageReference, sc *types.SystemContext) ([]pullRefPair, error) {
+ destNames, err := reader.ManifestTagsForReference(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(destNames) == 0 {
+ destName, err := getImageDigest(ctx, ref, sc)
+ if err != nil {
+ return nil, err
+ }
+ destNames = append(destNames, destName)
+ } else {
+ for i := range destNames {
+ ref, err := NormalizedTag(destNames[i])
+ if err != nil {
+ return nil, err
+ }
+ destNames[i] = ref.String()
+ }
+ }
+
+ refPairs := []pullRefPair{}
+ for _, destName := range destNames {
+ destRef, err := is.Transport.ParseStoreReference(ir.store, destName)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error parsing dest reference name %#v", destName)
+ }
+ pair := pullRefPair{
+ image: destName,
+ srcRef: ref,
+ dstRef: destRef,
+ }
+ refPairs = append(refPairs, pair)
+ }
+
+ return refPairs, nil
+}
+
// pullGoalFromImageReference returns a pull goal for a single ImageReference, depending on the used transport.
+// Note that callers are responsible for invoking (*pullGoal).cleanUp() to clean up possibly open resources.
func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types.ImageReference, imgName string, sc *types.SystemContext) (*pullGoal, error) {
span, _ := opentracing.StartSpanFromContext(ctx, "pullGoalFromImageReference")
defer span.Finish()
@@ -122,57 +178,26 @@ func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types.
// supports pulling from docker-archive, oci, and registries
switch srcRef.Transport().Name() {
case DockerArchive:
- archivePath := srcRef.StringWithinTransport()
- tarSource, err := tarfile.NewSourceFromFile(archivePath)
+ reader, readerRef, err := archive.NewReaderForReference(sc, srcRef)
if err != nil {
return nil, err
}
- defer tarSource.Close()
- manifest, err := tarSource.LoadTarManifest()
+ pairs, err := ir.getPullRefPairsFromDockerArchiveReference(ctx, reader, readerRef, sc)
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving manifest.json")
- }
- // to pull the first image stored in the tar file
- if len(manifest) == 0 {
- // use the hex of the digest if no manifest is found
- reference, err := getImageDigest(ctx, srcRef, sc)
- if err != nil {
- return nil, err
- }
- return ir.getSinglePullRefPairGoal(srcRef, reference)
- }
-
- if len(manifest[0].RepoTags) == 0 {
- // If the input image has no repotags, we need to feed it a dest anyways
- digest, err := getImageDigest(ctx, srcRef, sc)
- if err != nil {
- return nil, err
+ // No need to defer for a single error path.
+ if err := reader.Close(); err != nil {
+ logrus.Error(err.Error())
}
- return ir.getSinglePullRefPairGoal(srcRef, digest)
+ return nil, err
}
- // Need to load in all the repo tags from the manifest
- res := []pullRefPair{}
- for _, dst := range manifest[0].RepoTags {
- //check if image exists and gives a warning of untagging
- localImage, err := ir.NewFromLocal(dst)
- imageID := strings.TrimSuffix(manifest[0].Config, ".json")
- if err == nil && imageID != localImage.ID() {
- logrus.Errorf("the image %s already exists, renaming the old one with ID %s to empty string", dst, localImage.ID())
- }
-
- pullInfo, err := ir.getPullRefPair(srcRef, dst)
- if err != nil {
- return nil, err
- }
- res = append(res, pullInfo)
- }
return &pullGoal{
- refPairs: res,
pullAllPairs: true,
usedSearchRegistries: false,
+ refPairs: pairs,
searchedRegistries: nil,
+ cleanUpFuncs: []cleanUpFunc{reader.Close},
}, nil
case OCIArchive:
@@ -249,6 +274,7 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s
return nil, errors.Wrapf(err, "error determining pull goal for image %q", inputName)
}
}
+ defer goal.cleanUp()
return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, label)
}
@@ -267,6 +293,7 @@ func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.Imag
if err != nil {
return nil, errors.Wrapf(err, "error determining pull goal for image %q", transports.ImageName(srcRef))
}
+ defer goal.cleanUp()
return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, nil)
}
diff --git a/libpod/image/pull_test.go b/libpod/image/pull_test.go
index 0046cdfef..6cb80e8b5 100644
--- a/libpod/image/pull_test.go
+++ b/libpod/image/pull_test.go
@@ -150,7 +150,7 @@ func TestPullGoalFromImageReference(t *testing.T) {
{ // RepoTags is empty
"docker-archive:testdata/docker-unnamed.tar.xz",
[]expected{{"@ec9293436c2e66da44edb9efb8d41f6b13baf62283ebe846468bc992d76d7951", "@ec9293436c2e66da44edb9efb8d41f6b13baf62283ebe846468bc992d76d7951"}},
- false,
+ true,
},
{ // RepoTags is a [docker.io/library/]name:latest, normalized to the short format.
"docker-archive:testdata/docker-name-only.tar.xz",
@@ -170,11 +170,37 @@ func TestPullGoalFromImageReference(t *testing.T) {
},
true,
},
- { // FIXME: Two images in a single archive - only the "first" one (whichever it is) is returned
- // (and docker-archive: then refuses to read anything when the manifest has more than 1 item)
+ { // Reference image by name in multi-image archive
+ "docker-archive:testdata/docker-two-images.tar.xz:example.com/empty:latest",
+ []expected{
+ {"example.com/empty:latest", "example.com/empty:latest"},
+ },
+ true,
+ },
+ { // Reference image by name in multi-image archive
+ "docker-archive:testdata/docker-two-images.tar.xz:example.com/empty/but:different",
+ []expected{
+ {"example.com/empty/but:different", "example.com/empty/but:different"},
+ },
+ true,
+ },
+ { // Reference image by index in multi-image archive
+ "docker-archive:testdata/docker-two-images.tar.xz:@0",
+ []expected{
+ {"example.com/empty:latest", "example.com/empty:latest"},
+ },
+ true,
+ },
+ { // Reference image by index in multi-image archive
+ "docker-archive:testdata/docker-two-images.tar.xz:@1",
+ []expected{
+ {"example.com/empty/but:different", "example.com/empty/but:different"},
+ },
+ true,
+ },
+ { // Reference entire multi-image archive must fail (more than one manifest)
"docker-archive:testdata/docker-two-images.tar.xz",
- []expected{{"example.com/empty:latest", "example.com/empty:latest"}},
- // "example.com/empty/but:different" exists but is ignored
+ []expected{},
true,
},
@@ -248,7 +274,7 @@ func TestPullGoalFromImageReference(t *testing.T) {
for i, e := range c.expected {
testDescription := fmt.Sprintf("%s #%d", c.srcName, i)
assert.Equal(t, e.image, res.refPairs[i].image, testDescription)
- assert.Equal(t, srcRef, res.refPairs[i].srcRef, testDescription)
+ assert.Equal(t, transports.ImageName(srcRef), transports.ImageName(res.refPairs[i].srcRef), testDescription)
assert.Equal(t, e.dstName, storageReferenceWithoutLocation(res.refPairs[i].dstRef), testDescription)
}
assert.Equal(t, c.expectedPullAllPairs, res.pullAllPairs, c.srcName)
diff --git a/libpod/kube.go b/libpod/kube.go
index 5f2c9e0fd..9d5cbe68b 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -69,12 +69,20 @@ func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) {
return nil, servicePorts, err
}
servicePorts = containerPortsToServicePorts(ports)
+
}
pod, err := p.podWithContainers(allContainers, ports)
if err != nil {
return nil, servicePorts, err
}
pod.Spec.HostAliases = extraHost
+
+ if p.SharesPID() {
+ // unfortunately, go doesn't have a nice way to specify a pointer to a bool
+ b := true
+ pod.Spec.ShareProcessNamespace = &b
+ }
+
return pod, servicePorts, nil
}
@@ -191,7 +199,7 @@ func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1.
labels["app"] = removeUnderscores(podName)
om := v12.ObjectMeta{
// The name of the pod is container_name-libpod
- Name: removeUnderscores(podName),
+ Name: podName,
Labels: labels,
// CreationTimestamp seems to be required, so adding it; in doing so, the timestamp
// will reflect time this is run (not container create time) because the conversion
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 6f266e5d6..c0508ce39 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -4,6 +4,7 @@ package libpod
import (
"bytes"
+ "context"
"crypto/rand"
"fmt"
"io"
@@ -208,6 +209,20 @@ func checkSlirpFlags(path string) (*slirpFeatures, error) {
// Configure the network namespace for a rootless container
func (r *Runtime) setupRootlessNetNS(ctr *Container) error {
+ if ctr.config.NetMode.IsSlirp4netns() {
+ return r.setupSlirp4netns(ctr)
+ }
+ if len(ctr.config.Networks) > 0 {
+ // set up port forwarder for CNI-in-slirp4netns
+ netnsPath := ctr.state.NetNS.Path()
+ // TODO: support slirp4netns port forwarder as well
+ return r.setupRootlessPortMappingViaRLK(ctr, netnsPath)
+ }
+ return nil
+}
+
+// setupSlirp4netns can be called in rootful as well as in rootless
+func (r *Runtime) setupSlirp4netns(ctr *Container) error {
path := r.config.Engine.NetworkCmdPath
if path == "" {
@@ -711,7 +726,7 @@ func (r *Runtime) teardownNetNS(ctr *Container) error {
logrus.Debugf("Tearing down network namespace at %s for container %s", ctr.state.NetNS.Path(), ctr.ID())
- // rootless containers do not use the CNI plugin
+ // rootless containers do not use the CNI plugin directly
if !rootless.IsRootless() && !ctr.config.NetMode.IsSlirp4netns() {
var requestedIP net.IP
if ctr.requestedIP != nil {
@@ -738,6 +753,13 @@ func (r *Runtime) teardownNetNS(ctr *Container) error {
}
}
+ // CNI-in-slirp4netns
+ if rootless.IsRootless() && len(ctr.config.Networks) != 0 {
+ if err := DeallocRootlessCNI(context.Background(), ctr); err != nil {
+ return errors.Wrapf(err, "error tearing down CNI-in-slirp4netns for container %s", ctr.ID())
+ }
+ }
+
// First unmount the namespace
if err := netns.UnmountNS(ctr.state.NetNS); err != nil {
return errors.Wrapf(err, "error unmounting network namespace for container %s", ctr.ID())
diff --git a/libpod/networking_unsupported.go b/libpod/networking_unsupported.go
index dd72a3fd8..76bb01424 100644
--- a/libpod/networking_unsupported.go
+++ b/libpod/networking_unsupported.go
@@ -8,6 +8,10 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) error {
return define.ErrNotImplemented
}
+func (r *Runtime) setupSlirp4netns(ctr *Container) error {
+ return define.ErrNotImplemented
+}
+
func (r *Runtime) setupNetNS(ctr *Container) error {
return define.ErrNotImplemented
}
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index f66835771..bb138ca14 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -1086,7 +1086,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
cmd.ExtraFiles = append(cmd.ExtraFiles, childSyncPipe, childStartPipe)
cmd.ExtraFiles = append(cmd.ExtraFiles, envFiles...)
- if r.reservePorts && !ctr.config.NetMode.IsSlirp4netns() {
+ if r.reservePorts && !rootless.IsRootless() && !ctr.config.NetMode.IsSlirp4netns() {
ports, err := bindPorts(ctr.config.PortMappings)
if err != nil {
return err
@@ -1098,7 +1098,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
cmd.ExtraFiles = append(cmd.ExtraFiles, ports...)
}
- if ctr.config.NetMode.IsSlirp4netns() {
+ if ctr.config.NetMode.IsSlirp4netns() || rootless.IsRootless() {
if ctr.config.PostConfigureNetNS {
havePortMapping := len(ctr.Config().PortMappings) > 0
if havePortMapping {
diff --git a/libpod/options.go b/libpod/options.go
index dccbb8741..7eec530ea 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -18,6 +18,7 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/cri-o/ocicni/pkg/ocicni"
+ "github.com/opencontainers/runtime-tools/generate"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -897,6 +898,17 @@ func WithUserNSFrom(nsCtr *Container) CtrCreateOption {
ctr.config.UserNsCtr = nsCtr.ID()
ctr.config.IDMappings = nsCtr.config.IDMappings
+ g := generate.NewFromSpec(ctr.config.Spec)
+
+ g.ClearLinuxUIDMappings()
+ for _, uidmap := range nsCtr.config.IDMappings.UIDMap {
+ g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size))
+ }
+ g.ClearLinuxGIDMappings()
+ for _, gidmap := range nsCtr.config.IDMappings.GIDMap {
+ g.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size))
+ }
+ ctr.config.IDMappings = nsCtr.config.IDMappings
return nil
}
}
diff --git a/libpod/rootless_cni_linux.go b/libpod/rootless_cni_linux.go
new file mode 100644
index 000000000..76dbfdcae
--- /dev/null
+++ b/libpod/rootless_cni_linux.go
@@ -0,0 +1,320 @@
+// +build linux
+
+package libpod
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "path/filepath"
+ "runtime"
+
+ cnitypes "github.com/containernetworking/cni/pkg/types/current"
+ "github.com/containernetworking/plugins/pkg/ns"
+ "github.com/containers/podman/v2/libpod/define"
+ "github.com/containers/podman/v2/libpod/image"
+ "github.com/containers/podman/v2/pkg/util"
+ "github.com/containers/storage/pkg/lockfile"
+ "github.com/hashicorp/go-multierror"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+var rootlessCNIInfraImage = map[string]string{
+ // Built from ../contrib/rootless-cni-infra
+ // TODO: move to Podman's official quay
+ "amd64": "ghcr.io/akihirosuda/podman-rootless-cni-infra:gd34868a13-amd64",
+}
+
+const (
+ rootlessCNIInfraContainerNamespace = "podman-system"
+ rootlessCNIInfraContainerName = "rootless-cni-infra"
+)
+
+// AllocRootlessCNI allocates a CNI netns inside the rootless CNI infra container.
+// Locks "rootless-cni-infra.lck".
+//
+// When the infra container is not running, it is created.
+//
+// AllocRootlessCNI does not lock c. c should be already locked.
+func AllocRootlessCNI(ctx context.Context, c *Container) (ns.NetNS, []*cnitypes.Result, error) {
+ if len(c.config.Networks) == 0 {
+ return nil, nil, errors.New("allocRootlessCNI shall not be called when len(c.config.Networks) == 0")
+ }
+ l, err := getRootlessCNIInfraLock(c.runtime)
+ if err != nil {
+ return nil, nil, err
+ }
+ l.Lock()
+ defer l.Unlock()
+ infra, err := ensureRootlessCNIInfraContainerRunning(ctx, c.runtime)
+ if err != nil {
+ return nil, nil, err
+ }
+ k8sPodName := getPodOrContainerName(c) // passed to CNI as K8S_POD_NAME
+ cniResults := make([]*cnitypes.Result, len(c.config.Networks))
+ for i, nw := range c.config.Networks {
+ cniRes, err := rootlessCNIInfraCallAlloc(infra, c.ID(), nw, k8sPodName)
+ if err != nil {
+ return nil, nil, err
+ }
+ cniResults[i] = cniRes
+ }
+ nsObj, err := rootlessCNIInfraGetNS(infra, c.ID())
+ if err != nil {
+ return nil, nil, err
+ }
+ logrus.Debugf("rootless CNI: container %q will join %q", c.ID(), nsObj.Path())
+ return nsObj, cniResults, nil
+}
+
+// DeallocRootlessCNI deallocates a CNI netns inside the rootless CNI infra container.
+// Locks "rootless-cni-infra.lck".
+//
+// When the infra container is no longer needed, it is removed.
+//
+// DeallocRootlessCNI does not lock c. c should be already locked.
+func DeallocRootlessCNI(ctx context.Context, c *Container) error {
+ if len(c.config.Networks) == 0 {
+ return errors.New("deallocRootlessCNI shall not be called when len(c.config.Networks) == 0")
+ }
+ l, err := getRootlessCNIInfraLock(c.runtime)
+ if err != nil {
+ return err
+ }
+ l.Lock()
+ defer l.Unlock()
+ infra, _ := getRootlessCNIInfraContainer(c.runtime)
+ if infra == nil {
+ return nil
+ }
+ var errs *multierror.Error
+ for _, nw := range c.config.Networks {
+ err := rootlessCNIInfraCallDelloc(infra, c.ID(), nw)
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ }
+ }
+ if isIdle, err := rootlessCNIInfraIsIdle(infra); isIdle || err != nil {
+ if err != nil {
+ logrus.Warn(err)
+ }
+ logrus.Debugf("rootless CNI: removing infra container %q", infra.ID())
+ if err := c.runtime.removeContainer(ctx, infra, true, false, true); err != nil {
+ return err
+ }
+ logrus.Debugf("rootless CNI: removed infra container %q", infra.ID())
+ }
+ return errs.ErrorOrNil()
+}
+
+func getRootlessCNIInfraLock(r *Runtime) (lockfile.Locker, error) {
+ fname := filepath.Join(r.config.Engine.TmpDir, "rootless-cni-infra.lck")
+ return lockfile.GetLockfile(fname)
+}
+
+func getPodOrContainerName(c *Container) string {
+ pod, err := c.runtime.GetPod(c.PodID())
+ if err != nil || pod.config.Name == "" {
+ return c.Name()
+ }
+ return pod.config.Name
+}
+
+func rootlessCNIInfraCallAlloc(infra *Container, id, nw, k8sPodName string) (*cnitypes.Result, error) {
+ logrus.Debugf("rootless CNI: alloc %q, %q, %q", id, nw, k8sPodName)
+ var err error
+
+ _, err = rootlessCNIInfraExec(infra, "alloc", id, nw, k8sPodName)
+ if err != nil {
+ return nil, err
+ }
+ cniResStr, err := rootlessCNIInfraExec(infra, "print-cni-result", id, nw)
+ if err != nil {
+ return nil, err
+ }
+ var cniRes cnitypes.Result
+ if err := json.Unmarshal([]byte(cniResStr), &cniRes); err != nil {
+ return nil, errors.Wrapf(err, "unmarshaling as cnitypes.Result: %q", cniResStr)
+ }
+ return &cniRes, nil
+}
+
+func rootlessCNIInfraCallDelloc(infra *Container, id, nw string) error {
+ logrus.Debugf("rootless CNI: dealloc %q, %q", id, nw)
+ _, err := rootlessCNIInfraExec(infra, "dealloc", id, nw)
+ return err
+}
+
+func rootlessCNIInfraIsIdle(infra *Container) (bool, error) {
+ type isIdle struct {
+ Idle bool `json:"idle"`
+ }
+ resStr, err := rootlessCNIInfraExec(infra, "is-idle")
+ if err != nil {
+ return false, err
+ }
+ var res isIdle
+ if err := json.Unmarshal([]byte(resStr), &res); err != nil {
+ return false, errors.Wrapf(err, "unmarshaling as isIdle: %q", resStr)
+ }
+ return res.Idle, nil
+}
+
+func rootlessCNIInfraGetNS(infra *Container, id string) (ns.NetNS, error) {
+ type printNetnsPath struct {
+ Path string `json:"path"`
+ }
+ resStr, err := rootlessCNIInfraExec(infra, "print-netns-path", id)
+ if err != nil {
+ return nil, err
+ }
+ var res printNetnsPath
+ if err := json.Unmarshal([]byte(resStr), &res); err != nil {
+ return nil, errors.Wrapf(err, "unmarshaling as printNetnsPath: %q", resStr)
+ }
+ nsObj, err := ns.GetNS(res.Path)
+ if err != nil {
+ return nil, err
+ }
+ return nsObj, nil
+}
+
+func getRootlessCNIInfraContainer(r *Runtime) (*Container, error) {
+ containers, err := r.GetContainersWithoutLock(func(c *Container) bool {
+ return c.Namespace() == rootlessCNIInfraContainerNamespace &&
+ c.Name() == rootlessCNIInfraContainerName
+ })
+ if err != nil {
+ return nil, err
+ }
+ if len(containers) == 0 {
+ return nil, nil
+ }
+ return containers[0], nil
+}
+
+func ensureRootlessCNIInfraContainerRunning(ctx context.Context, r *Runtime) (*Container, error) {
+ c, err := getRootlessCNIInfraContainer(r)
+ if err != nil {
+ return nil, err
+ }
+ if c == nil {
+ return startRootlessCNIInfraContainer(ctx, r)
+ }
+ st, err := c.ContainerState()
+ if err != nil {
+ return nil, err
+ }
+ if st.State == define.ContainerStateRunning {
+ logrus.Debugf("rootless CNI: infra container %q is already running", c.ID())
+ return c, nil
+ }
+ logrus.Debugf("rootless CNI: infra container %q is %q, being started", c.ID(), st.State)
+ if err := c.initAndStart(ctx); err != nil {
+ return nil, err
+ }
+ logrus.Debugf("rootless CNI: infra container %q is running", c.ID())
+ return c, nil
+}
+
+func startRootlessCNIInfraContainer(ctx context.Context, r *Runtime) (*Container, error) {
+ imageName, ok := rootlessCNIInfraImage[runtime.GOARCH]
+ if !ok {
+ return nil, errors.Errorf("cannot find rootless-podman-network-sandbox image for %s", runtime.GOARCH)
+ }
+ logrus.Debugf("rootless CNI: ensuring image %q to exist", imageName)
+ newImage, err := r.ImageRuntime().New(ctx, imageName, "", "", nil, nil,
+ image.SigningOptions{}, nil, util.PullImageMissing)
+ if err != nil {
+ return nil, err
+ }
+ logrus.Debugf("rootless CNI: image %q is ready", imageName)
+
+ g, err := generate.New("linux")
+ if err != nil {
+ return nil, err
+ }
+ g.SetupPrivileged(true)
+ // Set --pid=host for ease of propagating "/proc/PID/ns/net" string
+ if err := g.RemoveLinuxNamespace(string(spec.PIDNamespace)); err != nil {
+ return nil, err
+ }
+ g.RemoveMount("/proc")
+ procMount := spec.Mount{
+ Destination: "/proc",
+ Type: "bind",
+ Source: "/proc",
+ Options: []string{"rbind", "nosuid", "noexec", "nodev"},
+ }
+ g.AddMount(procMount)
+ // Mount CNI networks
+ etcCNINetD := spec.Mount{
+ Destination: "/etc/cni/net.d",
+ Type: "bind",
+ Source: r.config.Network.NetworkConfigDir,
+ Options: []string{"ro"},
+ }
+ g.AddMount(etcCNINetD)
+ // FIXME: how to propagate ProcessArgs and Envs from Dockerfile?
+ g.SetProcessArgs([]string{"sleep", "infinity"})
+ g.AddProcessEnv("CNI_PATH", "/opt/cni/bin")
+ var options []CtrCreateOption
+ options = append(options, WithRootFSFromImage(newImage.ID(), imageName, imageName))
+ options = append(options, WithCtrNamespace(rootlessCNIInfraContainerNamespace))
+ options = append(options, WithName(rootlessCNIInfraContainerName))
+ options = append(options, WithPrivileged(true))
+ options = append(options, WithSecLabels([]string{"disable"}))
+ options = append(options, WithRestartPolicy("always"))
+ options = append(options, WithNetNS(nil, false, "slirp4netns", nil))
+ c, err := r.NewContainer(ctx, g.Config, options...)
+ if err != nil {
+ return nil, err
+ }
+ logrus.Debugf("rootless CNI infra container %q is created, now being started", c.ID())
+ if err := c.initAndStart(ctx); err != nil {
+ return nil, err
+ }
+ logrus.Debugf("rootless CNI: infra container %q is running", c.ID())
+
+ return c, nil
+}
+
+func rootlessCNIInfraExec(c *Container, args ...string) (string, error) {
+ cmd := "rootless-cni-infra"
+ var (
+ outB bytes.Buffer
+ errB bytes.Buffer
+ streams define.AttachStreams
+ config ExecConfig
+ )
+ streams.OutputStream = &nopWriteCloser{Writer: &outB}
+ streams.ErrorStream = &nopWriteCloser{Writer: &errB}
+ streams.AttachOutput = true
+ streams.AttachError = true
+ config.Command = append([]string{cmd}, args...)
+ config.Privileged = true
+ logrus.Debugf("rootlessCNIInfraExec: c.ID()=%s, config=%+v, streams=%v, begin",
+ c.ID(), config, streams)
+ code, err := c.Exec(&config, &streams, nil)
+ logrus.Debugf("rootlessCNIInfraExec: c.ID()=%s, config=%+v, streams=%v, end (code=%d, err=%v)",
+ c.ID(), config, streams, code, err)
+ if err != nil {
+ return "", err
+ }
+ if code != 0 {
+ return "", errors.Errorf("command %s %v in container %s failed with status %d, stdout=%q, stderr=%q",
+ cmd, args, c.ID(), code, outB.String(), errB.String())
+ }
+ return outB.String(), nil
+}
+
+type nopWriteCloser struct {
+ io.Writer
+}
+
+func (nwc *nopWriteCloser) Close() error {
+ return nil
+}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index fa91fe002..241448981 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -8,11 +8,13 @@ import (
"strings"
"time"
+ "github.com/containers/buildah"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/libpod/events"
"github.com/containers/podman/v2/pkg/cgroups"
"github.com/containers/podman/v2/pkg/rootless"
+ "github.com/containers/storage"
"github.com/containers/storage/pkg/stringid"
"github.com/docker/go-units"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -770,7 +772,11 @@ func (r *Runtime) LookupContainer(idOrName string) (*Container, error) {
func (r *Runtime) GetContainers(filters ...ContainerFilter) ([]*Container, error) {
r.lock.RLock()
defer r.lock.RUnlock()
+ return r.GetContainersWithoutLock(filters...)
+}
+// GetContainersWithoutLock is same as GetContainers but without lock
+func (r *Runtime) GetContainersWithoutLock(filters ...ContainerFilter) ([]*Container, error) {
if !r.valid {
return nil, define.ErrRuntimeStopped
}
@@ -905,3 +911,34 @@ func (r *Runtime) PruneContainers(filterFuncs []ContainerFilter) (map[string]int
}
return prunedContainers, pruneErrors, nil
}
+
+// StorageContainers returns a list of containers from containers/storage that
+// are not currently known to Podman.
+func (r *Runtime) StorageContainers() ([]storage.Container, error) {
+
+ if r.store == nil {
+ return nil, define.ErrStoreNotInitialized
+ }
+
+ storeContainers, err := r.store.Containers()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading list of all storage containers")
+ }
+ retCtrs := []storage.Container{}
+ for _, container := range storeContainers {
+ exists, err := r.state.HasContainer(container.ID)
+ if err != nil && err != define.ErrNoSuchCtr {
+ return nil, errors.Wrapf(err, "failed to check if %s container exists in database", container.ID)
+ }
+ if exists {
+ continue
+ }
+ retCtrs = append(retCtrs, container)
+ }
+
+ return retCtrs, nil
+}
+
+func (r *Runtime) IsBuildahContainer(id string) (bool, error) {
+ return buildah.IsContainer(id, r.store)
+}
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index 2bc9feb65..eb4512f8d 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -282,9 +282,16 @@ func (r *Runtime) LoadImage(ctx context.Context, name, inputFile string, writer
src types.ImageReference
)
+ if name == "" {
+ newImages, err = r.ImageRuntime().LoadAllImagesFromDockerArchive(ctx, inputFile, signaturePolicy, writer)
+ if err == nil {
+ return getImageNames(newImages), nil
+ }
+ }
+
for _, referenceFn := range []func() (types.ImageReference, error){
func() (types.ImageReference, error) {
- return dockerarchive.ParseReference(inputFile) // FIXME? We should add dockerarchive.NewReference()
+ return dockerarchive.ParseReference(inputFile)
},
func() (types.ImageReference, error) {
return ociarchive.NewReference(inputFile, name) // name may be ""
diff --git a/pkg/api/handlers/compat/containers.go b/pkg/api/handlers/compat/containers.go
index 1ae6a990b..b1ef08cda 100644
--- a/pkg/api/handlers/compat/containers.go
+++ b/pkg/api/handlers/compat/containers.go
@@ -5,6 +5,7 @@ import (
"fmt"
"net/http"
"strings"
+ "syscall"
"github.com/containers/podman/v2/libpod"
"github.com/containers/podman/v2/libpod/define"
@@ -169,16 +170,16 @@ func KillContainer(w http.ResponseWriter, r *http.Request) {
return
}
- err = con.Kill(uint(sig))
+ signal := uint(sig)
+
+ err = con.Kill(signal)
if err != nil {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrapf(err, "unable to kill Container %s", name))
}
- if utils.IsLibpodRequest(r) {
- // the kill behavior for docker differs from podman in that they appear to wait
- // for the Container to croak so the exit code is accurate immediately after the
- // kill is sent. libpod does not. but we can add a wait here only for the docker
- // side of things and mimic that behavior
+ // Docker waits for the container to stop if the signal is 0 or
+ // SIGKILL.
+ if !utils.IsLibpodRequest(r) && (signal == 0 || syscall.Signal(signal) == syscall.SIGKILL) {
if _, err = con.Wait(); err != nil {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrapf(err, "failed to wait for Container %s", con.ID()))
return
diff --git a/pkg/api/handlers/compat/images.go b/pkg/api/handlers/compat/images.go
index 6872dd780..8765e20ca 100644
--- a/pkg/api/handlers/compat/images.go
+++ b/pkg/api/handlers/compat/images.go
@@ -365,7 +365,6 @@ func LoadImages(w http.ResponseWriter, r *http.Request) {
return
}
id, err := runtime.LoadImage(r.Context(), "", f.Name(), writer, "")
- //id, err := runtime.Import(r.Context())
if err != nil {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "failed to load image"))
return
diff --git a/pkg/api/handlers/compat/networks.go b/pkg/api/handlers/compat/networks.go
index 80b7505df..87b947549 100644
--- a/pkg/api/handlers/compat/networks.go
+++ b/pkg/api/handlers/compat/networks.go
@@ -5,6 +5,7 @@ import (
"net"
"net/http"
"os"
+ "strings"
"syscall"
"time"
@@ -177,9 +178,11 @@ func ListNetworks(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
+
+ filterNames, nameFilterExists := query.Filters["name"]
// TODO remove when filters are implemented
- if len(query.Filters) > 0 {
- utils.InternalServerError(w, errors.New("filters for listing networks is not implemented"))
+ if (!nameFilterExists && len(query.Filters) > 0) || len(query.Filters) > 1 {
+ utils.InternalServerError(w, errors.New("only the name filter for listing networks is implemented"))
return
}
netNames, err := network.GetNetworkNamesFromFileSystem(config)
@@ -187,6 +190,21 @@ func ListNetworks(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
+
+ // filter by name
+ if nameFilterExists {
+ names := []string{}
+ for _, name := range netNames {
+ for _, filter := range filterNames {
+ if strings.Contains(name, filter) {
+ names = append(names, name)
+ break
+ }
+ }
+ }
+ netNames = names
+ }
+
reports := make([]*types.NetworkResource, 0, len(netNames))
for _, name := range netNames {
report, err := getNetworkResourceByName(name, runtime)
diff --git a/pkg/api/handlers/libpod/generate.go b/pkg/api/handlers/libpod/generate.go
index 966874a2b..33bb75391 100644
--- a/pkg/api/handlers/libpod/generate.go
+++ b/pkg/api/handlers/libpod/generate.go
@@ -7,10 +7,55 @@ import (
"github.com/containers/podman/v2/pkg/api/handlers/utils"
"github.com/containers/podman/v2/pkg/domain/entities"
"github.com/containers/podman/v2/pkg/domain/infra/abi"
+ "github.com/containers/podman/v2/pkg/util"
"github.com/gorilla/schema"
"github.com/pkg/errors"
)
+func GenerateSystemd(w http.ResponseWriter, r *http.Request) {
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ decoder := r.Context().Value("decoder").(*schema.Decoder)
+ query := struct {
+ Name bool `schema:"useName"`
+ New bool `schema:"new"`
+ RestartPolicy string `schema:"restartPolicy"`
+ StopTimeout uint `schema:"stopTimeout"`
+ ContainerPrefix string `schema:"containerPrefix"`
+ PodPrefix string `schema:"podPrefix"`
+ Separator string `schema:"separator"`
+ }{
+ RestartPolicy: "on-failure",
+ StopTimeout: util.DefaultContainerConfig().Engine.StopTimeout,
+ ContainerPrefix: "container",
+ PodPrefix: "pod",
+ Separator: "-",
+ }
+
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
+ errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ return
+ }
+
+ containerEngine := abi.ContainerEngine{Libpod: runtime}
+ options := entities.GenerateSystemdOptions{
+ Name: query.Name,
+ New: query.New,
+ RestartPolicy: query.RestartPolicy,
+ StopTimeout: &query.StopTimeout,
+ ContainerPrefix: query.ContainerPrefix,
+ PodPrefix: query.PodPrefix,
+ Separator: query.Separator,
+ }
+ report, err := containerEngine.GenerateSystemd(r.Context(), utils.GetName(r), options)
+ if err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "error generating systemd units"))
+ return
+ }
+
+ utils.WriteResponse(w, http.StatusOK, report.Units)
+}
+
func GenerateKube(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
decoder := r.Context().Value("decoder").(*schema.Decoder)
diff --git a/pkg/api/handlers/libpod/images.go b/pkg/api/handlers/libpod/images.go
index 8d3fc4e00..85f7903dc 100644
--- a/pkg/api/handlers/libpod/images.go
+++ b/pkg/api/handlers/libpod/images.go
@@ -234,6 +234,76 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
utils.WriteResponse(w, http.StatusOK, rdr)
}
+func ExportImages(w http.ResponseWriter, r *http.Request) {
+ var (
+ output string
+ )
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ decoder := r.Context().Value("decoder").(*schema.Decoder)
+ query := struct {
+ Compress bool `schema:"compress"`
+ Format string `schema:"format"`
+ References []string `schema:"references"`
+ }{
+ Format: define.OCIArchive,
+ }
+
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
+ errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String()))
+ return
+ }
+
+ // References are mandatory!
+ if len(query.References) == 0 {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
+ errors.New("No references"))
+ return
+ }
+
+ // Format is mandatory! Currently, we only support multi-image docker
+ // archives.
+ switch query.Format {
+ case define.V2s2Archive:
+ tmpfile, err := ioutil.TempFile("", "api.tar")
+ if err != nil {
+ utils.Error(w, "unable to create tmpfile", http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
+ return
+ }
+ output = tmpfile.Name()
+ if err := tmpfile.Close(); err != nil {
+ utils.Error(w, "unable to close tmpfile", http.StatusInternalServerError, errors.Wrap(err, "unable to close tempfile"))
+ return
+ }
+ default:
+ utils.Error(w, "unsupported format", http.StatusInternalServerError, errors.Errorf("unsupported format %q", query.Format))
+ return
+ }
+ defer os.RemoveAll(output)
+
+ // Use the ABI image engine to share as much code as possible.
+ opts := entities.ImageSaveOptions{
+ Compress: query.Compress,
+ Format: query.Format,
+ MultiImageArchive: true,
+ Output: output,
+ }
+
+ imageEngine := abi.ImageEngine{Libpod: runtime}
+ if err := imageEngine.Save(r.Context(), query.References[0], query.References[1:], opts); err != nil {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, err)
+ return
+ }
+
+ rdr, err := os.Open(output)
+ if err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "failed to read the exported tarfile"))
+ return
+ }
+ defer rdr.Close()
+ utils.WriteResponse(w, http.StatusOK, rdr)
+}
+
func ImagesLoad(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
decoder := r.Context().Value("decoder").(*schema.Decoder)
diff --git a/pkg/api/handlers/libpod/networks.go b/pkg/api/handlers/libpod/networks.go
index 475522664..dfece2a4e 100644
--- a/pkg/api/handlers/libpod/networks.go
+++ b/pkg/api/handlers/libpod/networks.go
@@ -42,7 +42,21 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
}
func ListNetworks(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
- options := entities.NetworkListOptions{}
+ decoder := r.Context().Value("decoder").(*schema.Decoder)
+ query := struct {
+ Filter string `schema:"filter"`
+ }{
+ // override any golang type defaults
+ }
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
+ errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String()))
+ return
+ }
+
+ options := entities.NetworkListOptions{
+ Filter: query.Filter,
+ }
ic := abi.ContainerEngine{Libpod: runtime}
reports, err := ic.NetworkList(r.Context(), options)
if err != nil {
diff --git a/pkg/api/handlers/libpod/pods.go b/pkg/api/handlers/libpod/pods.go
index 8f8292567..82a7299b2 100644
--- a/pkg/api/handlers/libpod/pods.go
+++ b/pkg/api/handlers/libpod/pods.go
@@ -327,7 +327,7 @@ func PodTop(w http.ResponseWriter, r *http.Request) {
name := utils.GetName(r)
pod, err := runtime.LookupPod(name)
if err != nil {
- utils.ContainerNotFound(w, name, err)
+ utils.PodNotFound(w, name, err)
return
}
diff --git a/pkg/api/server/register_generate.go b/pkg/api/server/register_generate.go
index 7db8ee387..60e5b03f7 100644
--- a/pkg/api/server/register_generate.go
+++ b/pkg/api/server/register_generate.go
@@ -8,6 +8,68 @@ import (
)
func (s *APIServer) registerGenerateHandlers(r *mux.Router) error {
+ // swagger:operation GET /libpod/generate/{name:.*}/systemd libpod libpodGenerateSystemd
+ // ---
+ // tags:
+ // - containers
+ // - pods
+ // summary: Generate Systemd Units
+ // description: Generate Systemd Units based on a pod or container.
+ // parameters:
+ // - in: path
+ // name: name:.*
+ // type: string
+ // required: true
+ // description: Name or ID of the container or pod.
+ // - in: query
+ // name: useName
+ // type: boolean
+ // default: false
+ // description: Use container/pod names instead of IDs.
+ // - in: query
+ // name: new
+ // type: boolean
+ // default: false
+ // description: Create a new container instead of starting an existing one.
+ // - in: query
+ // name: time
+ // type: integer
+ // default: 10
+ // description: Stop timeout override.
+ // - in: query
+ // name: restartPolicy
+ // default: on-failure
+ // type: string
+ // enum: ["no", on-success, on-failure, on-abnormal, on-watchdog, on-abort, always]
+ // description: Systemd restart-policy.
+ // - in: query
+ // name: containerPrefix
+ // type: string
+ // default: container
+ // description: Systemd unit name prefix for containers.
+ // - in: query
+ // name: podPrefix
+ // type: string
+ // default: pod
+ // description: Systemd unit name prefix for pods.
+ // - in: query
+ // name: separator
+ // type: string
+ // default: "-"
+ // description: Systemd unit name separator between name/id and prefix.
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // description: no error
+ // schema:
+ // type: object
+ // additionalProperties:
+ // type: string
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/libpod/generate/{name:.*}/systemd"), s.APIHandler(libpod.GenerateSystemd)).Methods(http.MethodGet)
+
// swagger:operation GET /libpod/generate/{name:.*}/kube libpod libpodGenerateKube
// ---
// tags:
diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go
index 64258a073..b1007fe09 100644
--- a/pkg/api/server/register_images.go
+++ b/pkg/api/server/register_images.go
@@ -1028,6 +1028,40 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// 500:
// $ref: '#/responses/InternalError'
r.Handle(VersionedPath("/libpod/images/{name:.*}/get"), s.APIHandler(libpod.ExportImage)).Methods(http.MethodGet)
+ // swagger:operation GET /libpod/images/export libpod libpodExportImages
+ // ---
+ // tags:
+ // - images
+ // summary: Export multiple images
+ // description: Export multiple images into a single object. Only `docker-archive` is currently supported.
+ // parameters:
+ // - in: query
+ // name: format
+ // type: string
+ // description: format for exported image (only docker-archive is supported)
+ // - in: query
+ // name: references
+ // description: references to images to export
+ // type: array
+ // items:
+ // type: string
+ // - in: query
+ // name: compress
+ // type: boolean
+ // description: use compression on image
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // description: no error
+ // schema:
+ // type: string
+ // format: binary
+ // 404:
+ // $ref: '#/responses/NoSuchImage'
+ // 500:
+ // $ref: '#/responses/InternalError'
+ r.Handle(VersionedPath("/libpod/images/export"), s.APIHandler(libpod.ExportImages)).Methods(http.MethodGet)
// swagger:operation GET /libpod/images/{name:.*}/json libpod libpodInspectImage
// ---
// tags:
diff --git a/pkg/api/server/register_networks.go b/pkg/api/server/register_networks.go
index 7918ad4a2..61916eedf 100644
--- a/pkg/api/server/register_networks.go
+++ b/pkg/api/server/register_networks.go
@@ -61,6 +61,11 @@ func (s *APIServer) registerNetworkHandlers(r *mux.Router) error {
// - networks (compat)
// summary: List networks
// description: Display summary of network configurations
+ // parameters:
+ // - in: query
+ // name: filters
+ // type: string
+ // description: JSON encoded value of the filters (a map[string][]string) to process on the networks list. Only the name filter is supported.
// produces:
// - application/json
// responses:
@@ -106,7 +111,7 @@ func (s *APIServer) registerNetworkHandlers(r *mux.Router) error {
// required: true
// description: the name of the network
// - in: query
- // name: Force
+ // name: force
// type: boolean
// description: remove containers associated with network
// produces:
@@ -152,6 +157,11 @@ func (s *APIServer) registerNetworkHandlers(r *mux.Router) error {
// - networks
// summary: List networks
// description: Display summary of network configurations
+ // parameters:
+ // - in: query
+ // name: filter
+ // type: string
+ // description: Provide filter values (e.g. 'name=podman')
// produces:
// - application/json
// responses:
diff --git a/pkg/bindings/generate/generate.go b/pkg/bindings/generate/generate.go
index b02221765..dde1cc29c 100644
--- a/pkg/bindings/generate/generate.go
+++ b/pkg/bindings/generate/generate.go
@@ -10,6 +10,33 @@ import (
"github.com/containers/podman/v2/pkg/domain/entities"
)
+func Systemd(ctx context.Context, nameOrID string, options entities.GenerateSystemdOptions) (*entities.GenerateSystemdReport, error) {
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+ params := url.Values{}
+
+ params.Set("useName", strconv.FormatBool(options.Name))
+ params.Set("new", strconv.FormatBool(options.New))
+ if options.RestartPolicy != "" {
+ params.Set("restartPolicy", options.RestartPolicy)
+ }
+ if options.StopTimeout != nil {
+ params.Set("stopTimeout", strconv.FormatUint(uint64(*options.StopTimeout), 10))
+ }
+ params.Set("containerPrefix", options.ContainerPrefix)
+ params.Set("podPrefix", options.PodPrefix)
+ params.Set("separator", options.Separator)
+
+ response, err := conn.DoRequest(nil, http.MethodGet, "/generate/%s/systemd", params, nil, nameOrID)
+ if err != nil {
+ return nil, err
+ }
+ report := &entities.GenerateSystemdReport{}
+ return report, response.Process(&report.Units)
+}
+
func Kube(ctx context.Context, nameOrID string, options entities.GenerateKubeOptions) (*entities.GenerateKubeReport, error) {
conn, err := bindings.GetClient(ctx)
if err != nil {
diff --git a/pkg/bindings/images/images.go b/pkg/bindings/images/images.go
index 9f6e78b79..a80c94025 100644
--- a/pkg/bindings/images/images.go
+++ b/pkg/bindings/images/images.go
@@ -128,6 +128,34 @@ func Load(ctx context.Context, r io.Reader, name *string) (*entities.ImageLoadRe
return &report, response.Process(&report)
}
+func MultiExport(ctx context.Context, namesOrIds []string, w io.Writer, format *string, compress *bool) error {
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return err
+ }
+ params := url.Values{}
+ if format != nil {
+ params.Set("format", *format)
+ }
+ if compress != nil {
+ params.Set("compress", strconv.FormatBool(*compress))
+ }
+ for _, ref := range namesOrIds {
+ params.Add("references", ref)
+ }
+ response, err := conn.DoRequest(nil, http.MethodGet, "/images/export", params, nil)
+ if err != nil {
+ return err
+ }
+
+ if response.StatusCode/100 == 2 || response.StatusCode/100 == 3 {
+ _, err = io.Copy(w, response.Body)
+ return err
+ }
+ return response.Process(nil)
+
+}
+
// Export saves an image from local storage as a tarball or image archive. The optional format
// parameter is used to change the format of the output.
func Export(ctx context.Context, nameOrID string, w io.Writer, format *string, compress *bool) error {
diff --git a/pkg/bindings/network/network.go b/pkg/bindings/network/network.go
index fd1111282..d8dc7e352 100644
--- a/pkg/bindings/network/network.go
+++ b/pkg/bindings/network/network.go
@@ -70,7 +70,7 @@ func Remove(ctx context.Context, nameOrID string, force *bool) ([]*entities.Netw
}
// List returns a summary of all CNI network configurations
-func List(ctx context.Context) ([]*entities.NetworkListReport, error) {
+func List(ctx context.Context, options entities.NetworkListOptions) ([]*entities.NetworkListReport, error) {
var (
netList []*entities.NetworkListReport
)
@@ -78,7 +78,11 @@ func List(ctx context.Context) ([]*entities.NetworkListReport, error) {
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/networks/json", nil, nil)
+ params := url.Values{}
+ if options.Filter != "" {
+ params.Set("filter", options.Filter)
+ }
+ response, err := conn.DoRequest(nil, http.MethodGet, "/networks/json", params, nil)
if err != nil {
return netList, err
}
diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go
index c8894300b..16997cdd1 100644
--- a/pkg/domain/entities/containers.go
+++ b/pkg/domain/entities/containers.go
@@ -282,6 +282,7 @@ type ContainerListOptions struct {
Quiet bool
Size bool
Sort string
+ Storage bool
Sync bool
Watch uint
}
diff --git a/pkg/domain/entities/generate.go b/pkg/domain/entities/generate.go
index a8ad13705..4a0d7537e 100644
--- a/pkg/domain/entities/generate.go
+++ b/pkg/domain/entities/generate.go
@@ -4,8 +4,6 @@ import "io"
// GenerateSystemdOptions control the generation of systemd unit files.
type GenerateSystemdOptions struct {
- // Files - generate files instead of printing to stdout.
- Files bool
// Name - use container/pod name instead of its ID.
Name bool
// New - create a new container instead of starting a new one.
@@ -24,9 +22,8 @@ type GenerateSystemdOptions struct {
// GenerateSystemdReport
type GenerateSystemdReport struct {
- // Output of the generate process. Either the generated files or their
- // entire content.
- Output string
+ // Units of the generate process. key = unit name -> value = unit content
+ Units map[string]string
}
// GenerateKubeOptions control the generation of Kubernetes YAML files.
diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go
index 3a12a4e22..2a8133680 100644
--- a/pkg/domain/entities/images.go
+++ b/pkg/domain/entities/images.go
@@ -271,11 +271,22 @@ type ImageImportReport struct {
Id string //nolint
}
+// ImageSaveOptions provide options for saving images.
type ImageSaveOptions struct {
+ // Compress layers when saving to a directory.
Compress bool
- Format string
- Output string
- Quiet bool
+ // Format of saving the image: oci-archive, oci-dir (directory with oci
+ // manifest type), docker-archive, docker-dir (directory with v2s2
+ // manifest type).
+ Format string
+ // MultiImageArchive denotes if the created archive shall include more
+ // than one image. Additional tags will be interpreted as references
+ // to images which are added to the archive.
+ MultiImageArchive bool
+ // Output - write image to the specified path.
+ Output string
+ // Quiet - suppress output when copying images
+ Quiet bool
}
// ImageTreeOptions provides options for ImageEngine.Tree()
diff --git a/pkg/domain/entities/manifest.go b/pkg/domain/entities/manifest.go
index 853619b19..01180951a 100644
--- a/pkg/domain/entities/manifest.go
+++ b/pkg/domain/entities/manifest.go
@@ -9,14 +9,19 @@ type ManifestCreateOptions struct {
}
type ManifestAddOptions struct {
- All bool `json:"all" schema:"all"`
- Annotation []string `json:"annotation" schema:"annotation"`
- Arch string `json:"arch" schema:"arch"`
- Features []string `json:"features" schema:"features"`
- Images []string `json:"images" schema:"images"`
- OS string `json:"os" schema:"os"`
- OSVersion string `json:"os_version" schema:"os_version"`
- Variant string `json:"variant" schema:"variant"`
+ All bool `json:"all" schema:"all"`
+ Annotation []string `json:"annotation" schema:"annotation"`
+ Arch string `json:"arch" schema:"arch"`
+ Authfile string `json:"-" schema:"-"`
+ CertDir string `json:"-" schema:"-"`
+ Features []string `json:"features" schema:"features"`
+ Images []string `json:"images" schema:"images"`
+ OS string `json:"os" schema:"os"`
+ OSVersion string `json:"os_version" schema:"os_version"`
+ Password string `json:"-" schema:"-"`
+ SkipTLSVerify types.OptionalBool `json:"-" schema:"-"`
+ Username string `json:"-" schema:"-"`
+ Variant string `json:"variant" schema:"variant"`
}
type ManifestAnnotateOptions struct {
diff --git a/pkg/domain/entities/system.go b/pkg/domain/entities/system.go
index af355b0af..bde2b6ef2 100644
--- a/pkg/domain/entities/system.go
+++ b/pkg/domain/entities/system.go
@@ -75,9 +75,10 @@ type SystemDfContainerReport struct {
// SystemDfVolumeReport describes a volume and its size
type SystemDfVolumeReport struct {
- VolumeName string
- Links int
- Size int64
+ VolumeName string
+ Links int
+ Size int64
+ ReclaimableSize int64
}
// SystemResetOptions describes the options for resetting your
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index 3fee5d394..21618f555 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -174,6 +174,12 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
return err
}
}
+ if c.AutoRemove() {
+ // Issue #7384: if the container is configured for
+ // auto-removal, it might already have been removed at
+ // this point.
+ return nil
+ }
return c.Cleanup(ctx)
})
if err != nil {
@@ -792,6 +798,9 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
}
func (ic *ContainerEngine) ContainerList(ctx context.Context, options entities.ContainerListOptions) ([]entities.ListContainer, error) {
+ if options.Latest {
+ options.Last = 1
+ }
return ps.GetContainerLists(ic.Libpod, options)
}
diff --git a/pkg/domain/infra/abi/containers_runlabel.go b/pkg/domain/infra/abi/containers_runlabel.go
index ab2316d47..30a5a55b8 100644
--- a/pkg/domain/infra/abi/containers_runlabel.go
+++ b/pkg/domain/infra/abi/containers_runlabel.go
@@ -7,12 +7,10 @@ import (
"path/filepath"
"strings"
- "github.com/containers/image/v5/types"
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/libpod/image"
"github.com/containers/podman/v2/pkg/domain/entities"
envLib "github.com/containers/podman/v2/pkg/env"
- "github.com/containers/podman/v2/pkg/util"
"github.com/containers/podman/v2/utils"
"github.com/google/shlex"
"github.com/pkg/errors"
@@ -36,6 +34,11 @@ func (ic *ContainerEngine) ContainerRunlabel(ctx context.Context, label string,
return err
}
+ if options.Display {
+ fmt.Printf("command: %s\n", strings.Join(append([]string{os.Args[0]}, cmd[1:]...), " "))
+ return nil
+ }
+
stdErr := os.Stderr
stdOut := os.Stdout
stdIn := os.Stdin
@@ -84,29 +87,17 @@ func (ic *ContainerEngine) runlabelImage(ctx context.Context, label string, imag
// Fallthrough and pull!
}
- // Parse credentials if specified.
- var credentials *types.DockerAuthConfig
- if options.Credentials != "" {
- credentials, err = util.ParseRegistryCreds(options.Credentials)
- if err != nil {
- return nil, err
- }
- }
-
- // Suppress pull progress bars if requested.
- pullOutput := os.Stdout
- if options.Quiet {
- pullOutput = nil // c/image/copy takes care of the rest
+ pullOptions := entities.ImagePullOptions{
+ Quiet: options.Quiet,
+ CertDir: options.CertDir,
+ SkipTLSVerify: options.SkipTLSVerify,
+ SignaturePolicy: options.SignaturePolicy,
+ Authfile: options.Authfile,
}
-
- // Pull the image.
- dockerRegistryOptions := image.DockerRegistryOptions{
- DockerCertPath: options.CertDir,
- DockerInsecureSkipTLSVerify: options.SkipTLSVerify,
- DockerRegistryCreds: credentials,
+ if _, err := pull(ctx, ic.Libpod.ImageRuntime(), imageRef, pullOptions, &label); err != nil {
+ return nil, err
}
-
- return ic.Libpod.ImageRuntime().New(ctx, imageRef, options.SignaturePolicy, options.Authfile, pullOutput, &dockerRegistryOptions, image.SigningOptions{}, &label, util.PullImageMissing)
+ return ic.Libpod.ImageRuntime().NewFromLocal(imageRef)
}
// generateRunlabelCommand generates the to-be-executed command as a string
diff --git a/pkg/domain/infra/abi/generate.go b/pkg/domain/infra/abi/generate.go
index 0b73ddd7e..79bf2291e 100644
--- a/pkg/domain/infra/abi/generate.go
+++ b/pkg/domain/infra/abi/generate.go
@@ -19,11 +19,11 @@ func (ic *ContainerEngine) GenerateSystemd(ctx context.Context, nameOrID string,
ctr, ctrErr := ic.Libpod.LookupContainer(nameOrID)
if ctrErr == nil {
// Generate the unit for the container.
- s, err := generate.ContainerUnit(ctr, options)
+ name, content, err := generate.ContainerUnit(ctr, options)
if err != nil {
return nil, err
}
- return &entities.GenerateSystemdReport{Output: s}, nil
+ return &entities.GenerateSystemdReport{Units: map[string]string{name: content}}, nil
}
// If it's not a container, we either have a pod or garbage.
@@ -34,11 +34,11 @@ func (ic *ContainerEngine) GenerateSystemd(ctx context.Context, nameOrID string,
}
// Generate the units for the pod and all its containers.
- s, err := generate.PodUnits(pod, options)
+ units, err := generate.PodUnits(pod, options)
if err != nil {
return nil, err
}
- return &entities.GenerateSystemdReport{Output: s}, nil
+ return &entities.GenerateSystemdReport{Units: units}, nil
}
func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrID string, options entities.GenerateKubeOptions) (*entities.GenerateKubeReport, error) {
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go
index 6b94ca9c0..23aef9573 100644
--- a/pkg/domain/infra/abi/images.go
+++ b/pkg/domain/infra/abi/images.go
@@ -14,7 +14,6 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker"
- dockerarchive "github.com/containers/image/v5/docker/archive"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/signature"
@@ -215,7 +214,7 @@ func ToDomainHistoryLayer(layer *libpodImage.History) entities.ImageHistoryLayer
return l
}
-func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, options entities.ImagePullOptions) (*entities.ImagePullReport, error) {
+func pull(ctx context.Context, runtime *image.Runtime, rawImage string, options entities.ImagePullOptions, label *string) (*entities.ImagePullReport, error) {
var writer io.Writer
if !options.Quiet {
writer = os.Stderr
@@ -230,15 +229,6 @@ func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, options entiti
}
}
- // Special-case for docker-archive which allows multiple tags.
- if imageRef.Transport().Name() == dockerarchive.Transport.Name() {
- newImage, err := ir.Libpod.ImageRuntime().LoadFromArchiveReference(ctx, imageRef, options.SignaturePolicy, writer)
- if err != nil {
- return nil, err
- }
- return &entities.ImagePullReport{Images: []string{newImage[0].ID()}}, nil
- }
-
var registryCreds *types.DockerAuthConfig
if len(options.Username) > 0 && len(options.Password) > 0 {
registryCreds = &types.DockerAuthConfig{
@@ -256,7 +246,7 @@ func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, options entiti
}
if !options.AllTags {
- newImage, err := ir.Libpod.ImageRuntime().New(ctx, rawImage, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, util.PullImageAlways)
+ newImage, err := runtime.New(ctx, rawImage, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, label, util.PullImageAlways)
if err != nil {
return nil, err
}
@@ -290,7 +280,7 @@ func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, options entiti
foundIDs := []string{}
for _, tag := range tags {
name := rawImage + ":" + tag
- newImage, err := ir.Libpod.ImageRuntime().New(ctx, name, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, util.PullImageAlways)
+ newImage, err := runtime.New(ctx, name, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, util.PullImageAlways)
if err != nil {
logrus.Errorf("error pulling image %q", name)
continue
@@ -304,6 +294,10 @@ func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, options entiti
return &entities.ImagePullReport{Images: foundIDs}, nil
}
+func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, options entities.ImagePullOptions) (*entities.ImagePullReport, error) {
+ return pull(ctx, ir.Libpod.ImageRuntime(), rawImage, options, nil)
+}
+
func (ir *ImageEngine) Inspect(ctx context.Context, namesOrIDs []string, opts entities.InspectOptions) ([]*entities.ImageInspectReport, []error, error) {
reports := []*entities.ImageInspectReport{}
errs := []error{}
@@ -481,6 +475,10 @@ func (ir *ImageEngine) Import(ctx context.Context, opts entities.ImageImportOpti
}
func (ir *ImageEngine) Save(ctx context.Context, nameOrID string, tags []string, options entities.ImageSaveOptions) error {
+ if options.MultiImageArchive {
+ nameOrIDs := append([]string{nameOrID}, tags...)
+ return ir.Libpod.ImageRuntime().SaveImages(ctx, nameOrIDs, options.Format, options.Output, options.Quiet)
+ }
newImage, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrID)
if err != nil {
return err
diff --git a/pkg/domain/infra/abi/manifest.go b/pkg/domain/infra/abi/manifest.go
index 6f3c6b902..55f73bf65 100644
--- a/pkg/domain/infra/abi/manifest.go
+++ b/pkg/domain/infra/abi/manifest.go
@@ -102,7 +102,24 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, opts entities.ManifestAd
}
manifestAddOpts.Annotation = annotations
}
- listID, err := listImage.AddManifest(*ir.Libpod.SystemContext(), manifestAddOpts)
+
+ // Set the system context.
+ sys := ir.Libpod.SystemContext()
+ if sys != nil {
+ sys = &types.SystemContext{}
+ }
+ sys.AuthFilePath = opts.Authfile
+ sys.DockerInsecureSkipTLSVerify = opts.SkipTLSVerify
+ sys.DockerCertPath = opts.CertDir
+
+ if opts.Username != "" && opts.Password != "" {
+ sys.DockerAuthConfig = &types.DockerAuthConfig{
+ Username: opts.Username,
+ Password: opts.Password,
+ }
+ }
+
+ listID, err := listImage.AddManifest(*sys, manifestAddOpts)
if err != nil {
return listID, err
}
@@ -191,6 +208,7 @@ func (ir *ImageEngine) ManifestPush(ctx context.Context, names []string, opts en
}
sys.AuthFilePath = opts.Authfile
sys.DockerInsecureSkipTLSVerify = opts.SkipTLSVerify
+ sys.DockerCertPath = opts.CertDir
if opts.Username != "" && opts.Password != "" {
sys.DockerAuthConfig = &types.DockerAuthConfig{
diff --git a/pkg/domain/infra/abi/network.go b/pkg/domain/infra/abi/network.go
index c06714cbb..807e4b272 100644
--- a/pkg/domain/infra/abi/network.go
+++ b/pkg/domain/infra/abi/network.go
@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
+ "os"
"path/filepath"
"strings"
@@ -216,6 +217,9 @@ func createBridge(r *libpod.Runtime, name string, options entities.NetworkCreate
if err != nil {
return "", err
}
+ if err := os.MkdirAll(network.GetCNIConfDir(runtimeConfig), 0755); err != nil {
+ return "", err
+ }
cniPathName := filepath.Join(network.GetCNIConfDir(runtimeConfig), fmt.Sprintf("%s.conflist", name))
err = ioutil.WriteFile(cniPathName, b, 0644)
return cniPathName, err
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index 31ad51672..6dfb52c63 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -132,7 +132,11 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
libpod.WithInfraContainer(),
libpod.WithPodName(podName),
}
- // TODO for now we just used the default kernel namespaces; we need to add/subtract this from yaml
+ // TODO we only configure Process namespace. We also need to account for Host{IPC,Network,PID}
+ // which is not currently possible with pod create
+ if podYAML.Spec.ShareProcessNamespace != nil && *podYAML.Spec.ShareProcessNamespace {
+ podOptions = append(podOptions, libpod.WithPodPID())
+ }
hostname := podYAML.Spec.Hostname
if hostname == "" {
@@ -556,6 +560,7 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
containerConfig.Env = envs
for _, volume := range containerYAML.VolumeMounts {
+ var readonly string
hostPath, exists := volumes[volume.Name]
if !exists {
return nil, errors.Errorf("Volume mount %s specified for container but not configured in volumes", volume.Name)
@@ -563,7 +568,10 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
if err := parse.ValidateVolumeCtrDir(volume.MountPath); err != nil {
return nil, errors.Wrapf(err, "error in parsing MountPath")
}
- containerConfig.Volumes = append(containerConfig.Volumes, fmt.Sprintf("%s:%s", hostPath, volume.MountPath))
+ if volume.ReadOnly {
+ readonly = ":ro"
+ }
+ containerConfig.Volumes = append(containerConfig.Volumes, fmt.Sprintf("%s:%s%s", hostPath, volume.MountPath, readonly))
}
return &containerConfig, nil
}
diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go
index ff1052d86..914a7681d 100644
--- a/pkg/domain/infra/abi/system.go
+++ b/pkg/domain/infra/abi/system.go
@@ -313,6 +313,7 @@ func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.System
}
dfVolumes := make([]*entities.SystemDfVolumeReport, 0, len(vols))
+ var reclaimableSize int64
for _, v := range vols {
var consInUse int
volSize, err := sizeOfPath(v.MountPoint())
@@ -323,15 +324,19 @@ func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.System
if err != nil {
return nil, err
}
+ if len(inUse) == 0 {
+ reclaimableSize += volSize
+ }
for _, viu := range inUse {
if util.StringInSlice(viu, runningContainers) {
consInUse++
}
}
report := entities.SystemDfVolumeReport{
- VolumeName: v.Name(),
- Links: consInUse,
- Size: volSize,
+ VolumeName: v.Name(),
+ Links: consInUse,
+ Size: volSize,
+ ReclaimableSize: reclaimableSize,
}
dfVolumes = append(dfVolumes, &report)
}
diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go
index cc919561f..062b38a70 100644
--- a/pkg/domain/infra/tunnel/containers.go
+++ b/pkg/domain/infra/tunnel/containers.go
@@ -8,11 +8,13 @@ import (
"os"
"strconv"
"strings"
+ "sync"
"time"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/podman/v2/libpod/define"
+ "github.com/containers/podman/v2/libpod/events"
"github.com/containers/podman/v2/pkg/api/handlers"
"github.com/containers/podman/v2/pkg/bindings"
"github.com/containers/podman/v2/pkg/bindings/containers"
@@ -507,33 +509,90 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
for _, w := range con.Warnings {
fmt.Fprintf(os.Stderr, "%s\n", w)
}
+
report := entities.ContainerRunReport{Id: con.ID}
- // Attach
- if !opts.Detach {
- err = startAndAttach(ic, con.ID, &opts.DetachKeys, opts.InputStream, opts.OutputStream, opts.ErrorStream)
- if err == nil {
- exitCode, err := containers.Wait(ic.ClientCxt, con.ID, nil)
- if err == nil {
- report.ExitCode = int(exitCode)
- }
+
+ if opts.Detach {
+ // Detach and return early
+ err := containers.Start(ic.ClientCxt, con.ID, nil)
+ if err != nil {
+ report.ExitCode = define.ExitCode(err)
}
- } else {
- err = containers.Start(ic.ClientCxt, con.ID, nil)
+ return &report, err
}
- if err != nil {
+
+ // Attach
+ if err := startAndAttach(ic, con.ID, &opts.DetachKeys, opts.InputStream, opts.OutputStream, opts.ErrorStream); err != nil {
report.ExitCode = define.ExitCode(err)
+ if opts.Rm {
+ if rmErr := containers.Remove(ic.ClientCxt, con.ID, bindings.PFalse, bindings.PTrue); rmErr != nil {
+ logrus.Debugf("unable to remove container %s after failing to start and attach to it", con.ID)
+ }
+ }
+ return &report, err
}
+
if opts.Rm {
- if err := containers.Remove(ic.ClientCxt, con.ID, bindings.PFalse, bindings.PTrue); err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr ||
- errors.Cause(err) == define.ErrCtrRemoved {
- logrus.Warnf("Container %s does not exist: %v", con.ID, err)
- } else {
- logrus.Errorf("Error removing container %s: %v", con.ID, err)
+ // Defer the removal, so we can return early if needed and
+ // de-spaghetti the code.
+ defer func() {
+ if err := containers.Remove(ic.ClientCxt, con.ID, bindings.PFalse, bindings.PTrue); err != nil {
+ if errors.Cause(err) == define.ErrNoSuchCtr ||
+ errors.Cause(err) == define.ErrCtrRemoved {
+ logrus.Warnf("Container %s does not exist: %v", con.ID, err)
+ } else {
+ logrus.Errorf("Error removing container %s: %v", con.ID, err)
+ }
}
+ }()
+ }
+
+ // Wait
+ exitCode, waitErr := containers.Wait(ic.ClientCxt, con.ID, nil)
+ if waitErr == nil {
+ report.ExitCode = int(exitCode)
+ return &report, nil
+ }
+
+ // Determine why the wait failed. If the container doesn't exist,
+ // consult the events.
+ if !strings.Contains(waitErr.Error(), define.ErrNoSuchCtr.Error()) {
+ return &report, waitErr
+ }
+
+ // Events
+ eventsChannel := make(chan *events.Event)
+ eventOptions := entities.EventsOptions{
+ EventChan: eventsChannel,
+ Filter: []string{
+ "type=container",
+ fmt.Sprintf("container=%s", con.ID),
+ fmt.Sprintf("event=%s", events.Exited),
+ },
+ }
+
+ var lastEvent *events.Event
+ var mutex sync.Mutex
+ mutex.Lock()
+ // Read the events.
+ go func() {
+ for e := range eventsChannel {
+ lastEvent = e
}
+ mutex.Unlock()
+ }()
+
+ eventsErr := ic.Events(ctx, eventOptions)
+
+ // Wait for all events to be read
+ mutex.Lock()
+ if eventsErr != nil || lastEvent == nil {
+ logrus.Errorf("Cannot get exit code: %v", err)
+ report.ExitCode = define.ExecErrorCodeNotFound
+ return &report, nil // compat with local client
}
+ report.ExitCode = lastEvent.ContainerExitCode
return &report, err
}
diff --git a/pkg/domain/infra/tunnel/generate.go b/pkg/domain/infra/tunnel/generate.go
index c7d5cd9e2..966f707b1 100644
--- a/pkg/domain/infra/tunnel/generate.go
+++ b/pkg/domain/infra/tunnel/generate.go
@@ -5,11 +5,10 @@ import (
"github.com/containers/podman/v2/pkg/bindings/generate"
"github.com/containers/podman/v2/pkg/domain/entities"
- "github.com/pkg/errors"
)
func (ic *ContainerEngine) GenerateSystemd(ctx context.Context, nameOrID string, options entities.GenerateSystemdOptions) (*entities.GenerateSystemdReport, error) {
- return nil, errors.New("not implemented for tunnel")
+ return generate.Systemd(ic.ClientCxt, nameOrID, options)
}
func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrID string, options entities.GenerateKubeOptions) (*entities.GenerateKubeReport, error) {
diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go
index b255c5da4..185cc2f9a 100644
--- a/pkg/domain/infra/tunnel/images.go
+++ b/pkg/domain/infra/tunnel/images.go
@@ -251,12 +251,23 @@ func (ir *ImageEngine) Save(ctx context.Context, nameOrID string, tags []string,
return err
}
- exErr := images.Export(ir.ClientCxt, nameOrID, f, &options.Format, &options.Compress)
- if err := f.Close(); err != nil {
- return err
- }
- if exErr != nil {
- return exErr
+ if options.MultiImageArchive {
+ exErr := images.MultiExport(ir.ClientCxt, append([]string{nameOrID}, tags...), f, &options.Format, &options.Compress)
+ if err := f.Close(); err != nil {
+ return err
+ }
+ if exErr != nil {
+ return exErr
+ }
+ } else {
+ // FIXME: tags are entirely ignored here but shouldn't.
+ exErr := images.Export(ir.ClientCxt, nameOrID, f, &options.Format, &options.Compress)
+ if err := f.Close(); err != nil {
+ return err
+ }
+ if exErr != nil {
+ return exErr
+ }
}
if options.Format != "oci-dir" && options.Format != "docker-dir" {
diff --git a/pkg/domain/infra/tunnel/network.go b/pkg/domain/infra/tunnel/network.go
index 2b197cac0..074425087 100644
--- a/pkg/domain/infra/tunnel/network.go
+++ b/pkg/domain/infra/tunnel/network.go
@@ -8,7 +8,7 @@ import (
)
func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.NetworkListOptions) ([]*entities.NetworkListReport, error) {
- return network.List(ic.ClientCxt)
+ return network.List(ic.ClientCxt, options)
}
func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []string, options entities.NetworkInspectOptions) ([]entities.NetworkInspectReport, error) {
diff --git a/pkg/network/files.go b/pkg/network/files.go
index 38ce38b97..a2090491f 100644
--- a/pkg/network/files.go
+++ b/pkg/network/files.go
@@ -14,11 +14,16 @@ import (
"github.com/pkg/errors"
)
-func GetCNIConfDir(config *config.Config) string {
- if len(config.Network.NetworkConfigDir) < 1 {
- return CNIConfigDir
+func GetCNIConfDir(configArg *config.Config) string {
+ if len(configArg.Network.NetworkConfigDir) < 1 {
+ dc, err := config.DefaultConfig()
+ if err != nil {
+ // Fallback to hard-coded dir
+ return CNIConfigDir
+ }
+ return dc.Network.NetworkConfigDir
}
- return config.Network.NetworkConfigDir
+ return configArg.Network.NetworkConfigDir
}
// LoadCNIConfsFromDir loads all the CNI configurations from a dir
diff --git a/pkg/ps/ps.go b/pkg/ps/ps.go
index 4c5f60844..8087507e2 100644
--- a/pkg/ps/ps.go
+++ b/pkg/ps/ps.go
@@ -14,6 +14,7 @@ import (
lpfilters "github.com/containers/podman/v2/libpod/filters"
"github.com/containers/podman/v2/pkg/domain/entities"
psdefine "github.com/containers/podman/v2/pkg/ps/define"
+ "github.com/containers/storage"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -54,12 +55,12 @@ func GetContainerLists(runtime *libpod.Runtime, options entities.ContainerListOp
return nil, err
}
if options.Last > 0 {
- // Sort the containers we got
+ // Sort the libpod containers
sort.Sort(SortCreateTime{SortContainers: cons})
// we should perform the lopping before we start getting
// the expensive information on containers
if options.Last < len(cons) {
- cons = cons[len(cons)-options.Last:]
+ cons = cons[:options.Last]
}
}
for _, con := range cons {
@@ -68,7 +69,31 @@ func GetContainerLists(runtime *libpod.Runtime, options entities.ContainerListOp
return nil, err
}
pss = append(pss, listCon)
+ }
+
+ if options.All && options.Storage {
+ externCons, err := runtime.StorageContainers()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, con := range externCons {
+ listCon, err := ListStorageContainer(runtime, con, options)
+ if err != nil {
+ return nil, err
+ }
+ pss = append(pss, listCon)
+ }
+ }
+
+ // Sort the containers we got
+ sort.Sort(SortPSCreateTime{SortPSContainers: pss})
+ if options.Last > 0 {
+ // only return the "last" containers caller requested
+ if options.Last < len(pss) {
+ pss = pss[:options.Last]
+ }
}
return pss, nil
}
@@ -199,6 +224,48 @@ func ListContainerBatch(rt *libpod.Runtime, ctr *libpod.Container, opts entities
return ps, nil
}
+func ListStorageContainer(rt *libpod.Runtime, ctr storage.Container, opts entities.ContainerListOptions) (entities.ListContainer, error) {
+ name := "unknown"
+ if len(ctr.Names) > 0 {
+ name = ctr.Names[0]
+ }
+
+ ps := entities.ListContainer{
+ ID: ctr.ID,
+ Created: ctr.Created.Unix(),
+ ImageID: ctr.ImageID,
+ State: "storage",
+ Names: []string{name},
+ }
+
+ buildahCtr, err := rt.IsBuildahContainer(ctr.ID)
+ if err != nil {
+ return ps, errors.Wrapf(err, "error determining buildah container for container %s", ctr.ID)
+ }
+
+ if buildahCtr {
+ ps.Command = []string{"buildah"}
+ } else {
+ ps.Command = []string{"storage"}
+ }
+
+ imageName := ""
+ if ctr.ImageID != "" {
+ names, err := rt.ImageRuntime().ImageNames(ctr.ImageID)
+ if err != nil {
+ return ps, err
+ }
+ if len(names) > 0 {
+ imageName = names[0]
+ }
+ } else if buildahCtr {
+ imageName = "scratch"
+ }
+
+ ps.Image = imageName
+ return ps, nil
+}
+
func getNamespaceInfo(path string) (string, error) {
val, err := os.Readlink(path)
if err != nil {
@@ -223,5 +290,17 @@ func (a SortContainers) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type SortCreateTime struct{ SortContainers }
func (a SortCreateTime) Less(i, j int) bool {
- return a.SortContainers[i].CreatedTime().Before(a.SortContainers[j].CreatedTime())
+ return a.SortContainers[i].CreatedTime().After(a.SortContainers[j].CreatedTime())
+}
+
+// SortPSContainers helps us set-up ability to sort by createTime
+type SortPSContainers []entities.ListContainer
+
+func (a SortPSContainers) Len() int { return len(a) }
+func (a SortPSContainers) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type SortPSCreateTime struct{ SortPSContainers }
+
+func (a SortPSCreateTime) Less(i, j int) bool {
+ return a.SortPSContainers[i].Created > a.SortPSContainers[j].Created
}
diff --git a/pkg/specgen/generate/oci.go b/pkg/specgen/generate/oci.go
index fd324c6e1..b57ddf1aa 100644
--- a/pkg/specgen/generate/oci.go
+++ b/pkg/specgen/generate/oci.go
@@ -353,6 +353,9 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt
configSpec.Annotations[define.InspectAnnotationInit] = define.InspectResponseFalse
}
+ if s.OOMScoreAdj != nil {
+ g.SetProcessOOMScoreAdj(*s.OOMScoreAdj)
+ }
setProcOpts(s, &g)
return configSpec, nil
diff --git a/pkg/specgen/generate/security.go b/pkg/specgen/generate/security.go
index d3e3d9278..87e8029a7 100644
--- a/pkg/specgen/generate/security.go
+++ b/pkg/specgen/generate/security.go
@@ -60,7 +60,7 @@ func setLabelOpts(s *specgen.SpecGenerator, runtime *libpod.Runtime, pidConfig s
func setupApparmor(s *specgen.SpecGenerator, rtc *config.Config, g *generate.Generator) error {
hasProfile := len(s.ApparmorProfile) > 0
if !apparmor.IsEnabled() {
- if hasProfile {
+ if hasProfile && s.ApparmorProfile != "unconfined" {
return errors.Errorf("Apparmor profile %q specified, but Apparmor is not enabled on this system", s.ApparmorProfile)
}
return nil
diff --git a/pkg/systemd/generate/containers.go b/pkg/systemd/generate/containers.go
index 5f6376977..a4fdae46e 100644
--- a/pkg/systemd/generate/containers.go
+++ b/pkg/systemd/generate/containers.go
@@ -3,9 +3,7 @@ package generate
import (
"bytes"
"fmt"
- "io/ioutil"
"os"
- "path/filepath"
"sort"
"strings"
"text/template"
@@ -87,17 +85,22 @@ KillMode=none
Type=forking
[Install]
-WantedBy=multi-user.target default.target`
+WantedBy=multi-user.target default.target
+`
// ContainerUnit generates a systemd unit for the specified container. Based
// on the options, the return value might be the entire unit or a file it has
// been written to.
-func ContainerUnit(ctr *libpod.Container, options entities.GenerateSystemdOptions) (string, error) {
+func ContainerUnit(ctr *libpod.Container, options entities.GenerateSystemdOptions) (string, string, error) {
info, err := generateContainerInfo(ctr, options)
if err != nil {
- return "", err
+ return "", "", err
+ }
+ content, err := executeContainerTemplate(info, options)
+ if err != nil {
+ return "", "", err
}
- return executeContainerTemplate(info, options)
+ return info.ServiceName, content, nil
}
func generateContainerInfo(ctr *libpod.Container, options entities.GenerateSystemdOptions) (*containerInfo, error) {
@@ -217,6 +220,9 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst
case "--replace":
hasReplaceParam = true
}
+ if strings.HasPrefix(p, "--name=") {
+ hasNameParam = true
+ }
}
if !hasDetachParam {
@@ -288,18 +294,5 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst
return "", err
}
- if !options.Files {
- return buf.String(), nil
- }
-
- buf.WriteByte('\n')
- cwd, err := os.Getwd()
- if err != nil {
- return "", errors.Wrap(err, "error getting current working directory")
- }
- path := filepath.Join(cwd, fmt.Sprintf("%s.service", info.ServiceName))
- if err := ioutil.WriteFile(path, buf.Bytes(), 0644); err != nil {
- return "", errors.Wrap(err, "error generating systemd unit")
- }
- return path, nil
+ return buf.String(), nil
}
diff --git a/pkg/systemd/generate/containers_test.go b/pkg/systemd/generate/containers_test.go
index b5c736c5a..d27062ef3 100644
--- a/pkg/systemd/generate/containers_test.go
+++ b/pkg/systemd/generate/containers_test.go
@@ -56,7 +56,8 @@ KillMode=none
Type=forking
[Install]
-WantedBy=multi-user.target default.target`
+WantedBy=multi-user.target default.target
+`
goodName := `# container-foobar.service
# autogenerated by Podman CI
@@ -78,7 +79,8 @@ KillMode=none
Type=forking
[Install]
-WantedBy=multi-user.target default.target`
+WantedBy=multi-user.target default.target
+`
goodNameBoundTo := `# container-foobar.service
# autogenerated by Podman CI
@@ -102,7 +104,8 @@ KillMode=none
Type=forking
[Install]
-WantedBy=multi-user.target default.target`
+WantedBy=multi-user.target default.target
+`
goodWithNameAndGeneric := `# jadda-jadda.service
# autogenerated by Podman CI
@@ -125,7 +128,8 @@ KillMode=none
Type=forking
[Install]
-WantedBy=multi-user.target default.target`
+WantedBy=multi-user.target default.target
+`
goodWithExplicitShortDetachParam := `# jadda-jadda.service
# autogenerated by Podman CI
@@ -148,7 +152,8 @@ KillMode=none
Type=forking
[Install]
-WantedBy=multi-user.target default.target`
+WantedBy=multi-user.target default.target
+`
goodNameNewWithPodFile := `# jadda-jadda.service
# autogenerated by Podman CI
@@ -171,7 +176,8 @@ KillMode=none
Type=forking
[Install]
-WantedBy=multi-user.target default.target`
+WantedBy=multi-user.target default.target
+`
goodNameNewDetach := `# jadda-jadda.service
# autogenerated by Podman CI
@@ -194,7 +200,8 @@ KillMode=none
Type=forking
[Install]
-WantedBy=multi-user.target default.target`
+WantedBy=multi-user.target default.target
+`
goodIDNew := `# container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service
# autogenerated by Podman CI
@@ -217,7 +224,8 @@ KillMode=none
Type=forking
[Install]
-WantedBy=multi-user.target default.target`
+WantedBy=multi-user.target default.target
+`
tests := []struct {
name string
@@ -375,8 +383,7 @@ WantedBy=multi-user.target default.target`
test := tt
t.Run(tt.name, func(t *testing.T) {
opts := entities.GenerateSystemdOptions{
- Files: false,
- New: test.new,
+ New: test.new,
}
got, err := executeContainerTemplate(&test.info, opts)
if (err != nil) != test.wantErr {
diff --git a/pkg/systemd/generate/pods.go b/pkg/systemd/generate/pods.go
index dec9587d9..c41eedd17 100644
--- a/pkg/systemd/generate/pods.go
+++ b/pkg/systemd/generate/pods.go
@@ -3,9 +3,7 @@ package generate
import (
"bytes"
"fmt"
- "io/ioutil"
"os"
- "path/filepath"
"sort"
"strings"
"text/template"
@@ -88,39 +86,40 @@ KillMode=none
Type=forking
[Install]
-WantedBy=multi-user.target default.target`
+WantedBy=multi-user.target default.target
+`
// PodUnits generates systemd units for the specified pod and its containers.
// Based on the options, the return value might be the content of all units or
// the files they been written to.
-func PodUnits(pod *libpod.Pod, options entities.GenerateSystemdOptions) (string, error) {
+func PodUnits(pod *libpod.Pod, options entities.GenerateSystemdOptions) (map[string]string, error) {
// Error out if the pod has no infra container, which we require to be the
// main service.
if !pod.HasInfraContainer() {
- return "", errors.Errorf("error generating systemd unit files: Pod %q has no infra container", pod.Name())
+ return nil, errors.Errorf("error generating systemd unit files: Pod %q has no infra container", pod.Name())
}
podInfo, err := generatePodInfo(pod, options)
if err != nil {
- return "", err
+ return nil, err
}
infraID, err := pod.InfraContainerID()
if err != nil {
- return "", err
+ return nil, err
}
// Compute the container-dependency graph for the Pod.
containers, err := pod.AllContainers()
if err != nil {
- return "", err
+ return nil, err
}
if len(containers) == 0 {
- return "", errors.Errorf("error generating systemd unit files: Pod %q has no containers", pod.Name())
+ return nil, errors.Errorf("error generating systemd unit files: Pod %q has no containers", pod.Name())
}
graph, err := libpod.BuildContainerGraph(containers)
if err != nil {
- return "", err
+ return nil, err
}
// Traverse the dependency graph and create systemdgen.containerInfo's for
@@ -133,7 +132,7 @@ func PodUnits(pod *libpod.Pod, options entities.GenerateSystemdOptions) (string,
}
ctrInfo, err := generateContainerInfo(ctr, options)
if err != nil {
- return "", err
+ return nil, err
}
// Now add the container's dependencies and at the container as a
// required service of the infra container.
@@ -149,24 +148,23 @@ func PodUnits(pod *libpod.Pod, options entities.GenerateSystemdOptions) (string,
containerInfos = append(containerInfos, ctrInfo)
}
+ units := map[string]string{}
// Now generate the systemd service for all containers.
- builder := strings.Builder{}
out, err := executePodTemplate(podInfo, options)
if err != nil {
- return "", err
+ return nil, err
}
- builder.WriteString(out)
+ units[podInfo.ServiceName] = out
for _, info := range containerInfos {
info.pod = podInfo
- builder.WriteByte('\n')
out, err := executeContainerTemplate(info, options)
if err != nil {
- return "", err
+ return nil, err
}
- builder.WriteString(out)
+ units[info.ServiceName] = out
}
- return builder.String(), nil
+ return units, nil
}
func generatePodInfo(pod *libpod.Pod, options entities.GenerateSystemdOptions) (*podInfo, error) {
@@ -339,18 +337,5 @@ func executePodTemplate(info *podInfo, options entities.GenerateSystemdOptions)
return "", err
}
- if !options.Files {
- return buf.String(), nil
- }
-
- buf.WriteByte('\n')
- cwd, err := os.Getwd()
- if err != nil {
- return "", errors.Wrap(err, "error getting current working directory")
- }
- path := filepath.Join(cwd, fmt.Sprintf("%s.service", info.ServiceName))
- if err := ioutil.WriteFile(path, buf.Bytes(), 0644); err != nil {
- return "", errors.Wrap(err, "error generating systemd unit")
- }
- return path, nil
+ return buf.String(), nil
}
diff --git a/pkg/systemd/generate/pods_test.go b/pkg/systemd/generate/pods_test.go
index 8bf4705a7..7f1f63b7e 100644
--- a/pkg/systemd/generate/pods_test.go
+++ b/pkg/systemd/generate/pods_test.go
@@ -58,7 +58,8 @@ KillMode=none
Type=forking
[Install]
-WantedBy=multi-user.target default.target`
+WantedBy=multi-user.target default.target
+`
podGoodNamedNew := `# pod-123abc.service
# autogenerated by Podman CI
@@ -84,7 +85,8 @@ KillMode=none
Type=forking
[Install]
-WantedBy=multi-user.target default.target`
+WantedBy=multi-user.target default.target
+`
tests := []struct {
name string
@@ -130,8 +132,7 @@ WantedBy=multi-user.target default.target`
test := tt
t.Run(tt.name, func(t *testing.T) {
opts := entities.GenerateSystemdOptions{
- Files: false,
- New: test.new,
+ New: test.new,
}
got, err := executePodTemplate(&test.info, opts)
if (err != nil) != test.wantErr {
diff --git a/rootless.md b/rootless.md
index 196ed52c3..22b03e340 100644
--- a/rootless.md
+++ b/rootless.md
@@ -28,9 +28,6 @@ can easily fail
* Can not use overlayfs driver, but does support fuse-overlayfs
* Ubuntu supports non root overlay, but no other Linux distros do.
* Only other supported driver is VFS.
-* No CNI Support
- * CNI wants to modify IPTables, plus other network manipulation that requires CAP_SYS_ADMIN.
- * There is potential we could probably do some sort of denylisting of the relevant plugins, and add a new plugin for rootless networking - slirp4netns as one example and there may be others
* Cannot use ping out of the box.
* [(Can be fixed by setting sysctl on host)](https://github.com/containers/podman/blob/master/troubleshooting.md#6-rootless-containers-cannot-ping-hosts)
* Requires new shadow-utils (not found in older (RHEL7/Centos7 distros) Should be fixed in RHEL7.7 release)
diff --git a/test/apiv2/35-networks.at b/test/apiv2/35-networks.at
index 4c032c072..143d6c07b 100644
--- a/test/apiv2/35-networks.at
+++ b/test/apiv2/35-networks.at
@@ -21,6 +21,27 @@ if root; then
t POST libpod/networks/create '"Subnet":{"IP":"10.10.1.0","Mask":[0,255,255,0]}' 500 \
.cause~'.*mask is invalid'
+ # network list
+ t GET libpod/networks/json 200
+ t GET libpod/networks/json?filter=name=network1 200 \
+ length=1 \
+ .[0].Name=network1
+ t GET networks 200
+
+ #network list docker endpoint
+ #filters={"name":["network1","network2"]}
+ t GET networks?filters=%7B%22name%22%3A%5B%22network1%22%2C%22network2%22%5D%7D 200 \
+ length=2
+ #filters={"name":["network"]}
+ t GET networks?filters=%7B%22name%22%3A%5B%22network%22%5D%7D 200 \
+ length=2
+ # invalid filter filters={"label":"abc"}
+ t GET networks?filters=%7B%22label%22%3A%5B%22abc%22%5D%7D 500 \
+ .cause="only the name filter for listing networks is implemented"
+ # invalid filter filters={"label":"abc","name":["network"]}
+ t GET networks?filters=%7B%22label%22%3A%22abc%22%2C%22name%22%3A%5B%22network%22%5D%7D 500 \
+ .cause="only the name filter for listing networks is implemented"
+
# clean the network
t DELETE libpod/networks/network1 200 \
.[0].Name~network1 \
diff --git a/test/apiv2/40-pods.at b/test/apiv2/40-pods.at
index 3df541de5..fdb61a84d 100644
--- a/test/apiv2/40-pods.at
+++ b/test/apiv2/40-pods.at
@@ -102,6 +102,10 @@ t GET libpod/pods/stats?namesOrIDs=fakename 404 \
t DELETE libpod/pods/bar?force=true 200
+# test the fake name
+t GET libpod/pods/fakename/top 404 \
+ .cause="no such pod"
+
t GET libpod/pods/foo/top 200 \
.Processes[0][-1]="/pause " \
.Titles[-1]="COMMAND"
diff --git a/test/e2e/build_test.go b/test/e2e/build_test.go
index 9fd82e149..0b6e919d0 100644
--- a/test/e2e/build_test.go
+++ b/test/e2e/build_test.go
@@ -57,6 +57,29 @@ var _ = Describe("Podman build", func() {
Expect(session.ExitCode()).To(Equal(0))
})
+ It("podman build with logfile", func() {
+ SkipIfRemote()
+ logfile := filepath.Join(podmanTest.TempDir, "logfile")
+ session := podmanTest.PodmanNoCache([]string{"build", "--tag", "test", "--logfile", logfile, "build/basicalpine"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Verify that OS and Arch are being set
+ inspect := podmanTest.PodmanNoCache([]string{"inspect", "test"})
+ inspect.WaitWithDefaultTimeout()
+ data := inspect.InspectImageJSON()
+ Expect(data[0].Os).To(Equal(runtime.GOOS))
+ Expect(data[0].Architecture).To(Equal(runtime.GOARCH))
+
+ st, err := os.Stat(logfile)
+ Expect(err).To(BeNil())
+ Expect(st.Size()).To(Not(Equal(0)))
+
+ session = podmanTest.PodmanNoCache([]string{"rmi", "alpine"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+
// If the context directory is pointing at a file and not a directory,
// that's a no no, fail out.
It("podman build context directory a file", func() {
diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go
index ed55484e3..b6bbae15b 100644
--- a/test/e2e/common_test.go
+++ b/test/e2e/common_test.go
@@ -245,6 +245,12 @@ func PodmanTestCreateUtil(tempDir string, remote bool) *PodmanTestIntegration {
}
os.Setenv("DISABLE_HC_SYSTEMD", "true")
CNIConfigDir := "/etc/cni/net.d"
+ if rootless.IsRootless() {
+ CNIConfigDir = filepath.Join(os.Getenv("HOME"), ".config/cni/net.d")
+ }
+ if err := os.MkdirAll(CNIConfigDir, 0755); err != nil {
+ panic(err)
+ }
storageFs := STORAGE_FS
if rootless.IsRootless() {
diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go
index 3c3fb5a4d..e886c6000 100644
--- a/test/e2e/generate_kube_test.go
+++ b/test/e2e/generate_kube_test.go
@@ -348,4 +348,33 @@ var _ = Describe("Podman generate kube", func() {
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(ContainSubstring(vol1))
})
+
+ It("podman generate kube sharing pid namespace", func() {
+ podName := "test"
+ podSession := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--share", "pid"})
+ podSession.WaitWithDefaultTimeout()
+ Expect(podSession.ExitCode()).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"create", "--pod", podName, "--name", "test1", ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ outputFile := filepath.Join(podmanTest.RunRoot, "pod.yaml")
+ kube := podmanTest.Podman([]string{"generate", "kube", podName, "-f", outputFile})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ rm := podmanTest.Podman([]string{"pod", "rm", "-f", podName})
+ rm.WaitWithDefaultTimeout()
+ Expect(rm.ExitCode()).To(Equal(0))
+
+ play := podmanTest.Podman([]string{"play", "kube", outputFile})
+ play.WaitWithDefaultTimeout()
+ Expect(play.ExitCode()).To(Equal(0))
+
+ inspect := podmanTest.Podman([]string{"pod", "inspect", podName})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.OutputToString()).To(ContainSubstring(`"pid"`))
+ })
})
diff --git a/test/e2e/generate_systemd_test.go b/test/e2e/generate_systemd_test.go
index 60d9162d1..da2f67754 100644
--- a/test/e2e/generate_systemd_test.go
+++ b/test/e2e/generate_systemd_test.go
@@ -1,5 +1,3 @@
-// +build !remote
-
package integration
import (
@@ -61,7 +59,7 @@ var _ = Describe("Podman generate systemd", func() {
session = podmanTest.Podman([]string{"generate", "systemd", "--restart-policy", "bogus", "foobar"})
session.WaitWithDefaultTimeout()
Expect(session).To(ExitWithError())
- found, _ := session.ErrorGrepString("Error: bogus is not a valid restart policy")
+ found, _ := session.ErrorGrepString("bogus is not a valid restart policy")
Expect(found).Should(BeTrue())
})
@@ -191,7 +189,7 @@ var _ = Describe("Podman generate systemd", func() {
Expect(found).To(BeTrue())
})
- It("podman generate systemd --new", func() {
+ It("podman generate systemd --new --name foo", func() {
n := podmanTest.Podman([]string{"create", "--name", "foo", "alpine", "top"})
n.WaitWithDefaultTimeout()
Expect(n.ExitCode()).To(Equal(0))
@@ -204,6 +202,29 @@ var _ = Describe("Podman generate systemd", func() {
found, _ := session.GrepString("# container-foo.service")
Expect(found).To(BeTrue())
+ found, _ = session.GrepString(" --replace ")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("stop --ignore --cidfile %t/container-foo.ctr-id -t 42")
+ Expect(found).To(BeTrue())
+ })
+
+ It("podman generate systemd --new --name=foo", func() {
+ n := podmanTest.Podman([]string{"create", "--name=foo", "alpine", "top"})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"generate", "systemd", "-t", "42", "--name", "--new", "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Grepping the output (in addition to unit tests)
+ found, _ := session.GrepString("# container-foo.service")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString(" --replace ")
+ Expect(found).To(BeTrue())
+
found, _ = session.GrepString("stop --ignore --cidfile %t/container-foo.ctr-id -t 42")
Expect(found).To(BeTrue())
})
@@ -383,4 +404,15 @@ var _ = Describe("Podman generate systemd", func() {
found, _ = session.GrepString("pod rm --ignore -f --pod-id-file %t/pod-foo.pod-id")
Expect(found).To(BeTrue())
})
+
+ It("podman generate systemd --format json", func() {
+ n := podmanTest.Podman([]string{"create", "--name", "foo", ALPINE})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"generate", "systemd", "--format", "json", "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.IsJSONOutputValid()).To(BeTrue())
+ })
})
diff --git a/test/e2e/load_test.go b/test/e2e/load_test.go
index 6a7f15e1f..2b401a09d 100644
--- a/test/e2e/load_test.go
+++ b/test/e2e/load_test.go
@@ -269,4 +269,12 @@ var _ = Describe("Podman load", func() {
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
})
+
+ It("podman load multi-image archive", func() {
+ result := podmanTest.PodmanNoCache([]string{"load", "-i", "./testdata/image/docker-two-images.tar.xz"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(result.LineInOutputContains("example.com/empty:latest")).To(BeTrue())
+ Expect(result.LineInOutputContains("example.com/empty/but:different")).To(BeTrue())
+ })
})
diff --git a/test/e2e/network_create_test.go b/test/e2e/network_create_test.go
index f97e6c1f1..13d515d8e 100644
--- a/test/e2e/network_create_test.go
+++ b/test/e2e/network_create_test.go
@@ -74,7 +74,6 @@ var _ = Describe("Podman network create", func() {
)
BeforeEach(func() {
- SkipIfRootless()
tempdir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
@@ -180,6 +179,7 @@ var _ = Describe("Podman network create", func() {
It("podman network create with name and IPv6 subnet", func() {
SkipIfRemote()
+ SkipIfRootless()
var (
results []network.NcList
)
diff --git a/test/e2e/network_test.go b/test/e2e/network_test.go
index f427afa67..c35b82fc1 100644
--- a/test/e2e/network_test.go
+++ b/test/e2e/network_test.go
@@ -1,5 +1,3 @@
-// +build !remote
-
package integration
import (
@@ -9,6 +7,7 @@ import (
"path/filepath"
"strings"
+ "github.com/containers/podman/v2/pkg/rootless"
. "github.com/containers/podman/v2/test/utils"
"github.com/containers/storage/pkg/stringid"
. "github.com/onsi/ginkgo"
@@ -34,7 +33,6 @@ var _ = Describe("Podman network", func() {
)
BeforeEach(func() {
- SkipIfRootless()
tempdir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
@@ -76,13 +74,12 @@ var _ = Describe("Podman network", func() {
}
]
}`
- cniPath = "/etc/cni/net.d"
)
It("podman network list", func() {
// Setup, use uuid to prevent conflict with other tests
uuid := stringid.GenerateNonCryptoID()
- secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid))
+ secondPath := filepath.Join(podmanTest.CNIConfigDir, fmt.Sprintf("%s.conflist", uuid))
writeConf([]byte(secondConf), secondPath)
defer removeConf(secondPath)
@@ -95,7 +92,7 @@ var _ = Describe("Podman network", func() {
It("podman network list -q", func() {
// Setup, use uuid to prevent conflict with other tests
uuid := stringid.GenerateNonCryptoID()
- secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid))
+ secondPath := filepath.Join(podmanTest.CNIConfigDir, fmt.Sprintf("%s.conflist", uuid))
writeConf([]byte(secondConf), secondPath)
defer removeConf(secondPath)
@@ -108,7 +105,7 @@ var _ = Describe("Podman network", func() {
It("podman network list --filter success", func() {
// Setup, use uuid to prevent conflict with other tests
uuid := stringid.GenerateNonCryptoID()
- secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid))
+ secondPath := filepath.Join(podmanTest.CNIConfigDir, fmt.Sprintf("%s.conflist", uuid))
writeConf([]byte(secondConf), secondPath)
defer removeConf(secondPath)
@@ -121,7 +118,7 @@ var _ = Describe("Podman network", func() {
It("podman network list --filter failure", func() {
// Setup, use uuid to prevent conflict with other tests
uuid := stringid.GenerateNonCryptoID()
- secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid))
+ secondPath := filepath.Join(podmanTest.CNIConfigDir, fmt.Sprintf("%s.conflist", uuid))
writeConf([]byte(secondConf), secondPath)
defer removeConf(secondPath)
@@ -140,7 +137,7 @@ var _ = Describe("Podman network", func() {
It("podman network rm", func() {
// Setup, use uuid to prevent conflict with other tests
uuid := stringid.GenerateNonCryptoID()
- secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid))
+ secondPath := filepath.Join(podmanTest.CNIConfigDir, fmt.Sprintf("%s.conflist", uuid))
writeConf([]byte(secondConf), secondPath)
defer removeConf(secondPath)
@@ -168,11 +165,16 @@ var _ = Describe("Podman network", func() {
It("podman network inspect", func() {
// Setup, use uuid to prevent conflict with other tests
uuid := stringid.GenerateNonCryptoID()
- secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid))
+ secondPath := filepath.Join(podmanTest.CNIConfigDir, fmt.Sprintf("%s.conflist", uuid))
writeConf([]byte(secondConf), secondPath)
defer removeConf(secondPath)
- session := podmanTest.Podman([]string{"network", "inspect", "podman-integrationtest", "podman"})
+ expectedNetworks := []string{"podman-integrationtest"}
+ if !rootless.IsRootless() {
+ // rootful image contains "podman/cni/87-podman-bridge.conflist" for "podman" network
+ expectedNetworks = append(expectedNetworks, "podman")
+ }
+ session := podmanTest.Podman(append([]string{"network", "inspect"}, expectedNetworks...))
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(session.IsJSONOutputValid()).To(BeTrue())
@@ -181,7 +183,7 @@ var _ = Describe("Podman network", func() {
It("podman network inspect", func() {
// Setup, use uuid to prevent conflict with other tests
uuid := stringid.GenerateNonCryptoID()
- secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid))
+ secondPath := filepath.Join(podmanTest.CNIConfigDir, fmt.Sprintf("%s.conflist", uuid))
writeConf([]byte(secondConf), secondPath)
defer removeConf(secondPath)
diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go
index 121cea017..5e01971cb 100644
--- a/test/e2e/play_kube_test.go
+++ b/test/e2e/play_kube_test.go
@@ -99,6 +99,12 @@ spec:
hostPort: {{ .Port }}
protocol: TCP
workingDir: /
+ volumeMounts:
+ {{ if .VolumeMount }}
+ - name: {{.VolumeName}}
+ mountPath: {{ .VolumeMountPath }}
+ readonly: {{.VolumeReadOnly}}
+ {{ end }}
{{ end }}
{{ end }}
{{ end }}
@@ -383,12 +389,16 @@ type Ctr struct {
PullPolicy string
HostIP string
Port string
+ VolumeMount bool
+ VolumeMountPath string
+ VolumeName string
+ VolumeReadOnly bool
}
// getCtr takes a list of ctrOptions and returns a Ctr with sane defaults
// and the configured options
func getCtr(options ...ctrOption) *Ctr {
- c := Ctr{defaultCtrName, defaultCtrImage, defaultCtrCmd, defaultCtrArg, true, false, nil, nil, "", "", ""}
+ c := Ctr{defaultCtrName, defaultCtrImage, defaultCtrCmd, defaultCtrArg, true, false, nil, nil, "", "", "", false, "", "", false}
for _, option := range options {
option(&c)
}
@@ -448,6 +458,15 @@ func withHostIP(ip string, port string) ctrOption {
}
}
+func withVolumeMount(mountPath string, readonly bool) ctrOption {
+ return func(c *Ctr) {
+ c.VolumeMountPath = mountPath
+ c.VolumeName = defaultVolName
+ c.VolumeReadOnly = readonly
+ c.VolumeMount = true
+ }
+}
+
func getCtrNameInPod(pod *Pod) string {
return fmt.Sprintf("%s-%s", pod.Name, defaultCtrName)
}
@@ -1035,4 +1054,27 @@ spec:
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).NotTo(Equal(0))
})
+
+ It("podman play kube test with read only volume", func() {
+ hostPathLocation := filepath.Join(tempdir, "file")
+ f, err := os.Create(hostPathLocation)
+ Expect(err).To(BeNil())
+ f.Close()
+
+ ctr := getCtr(withVolumeMount(hostPathLocation, true), withImage(BB))
+ pod := getPod(withVolume(getVolume("File", hostPathLocation)), withCtr(ctr))
+ err = generatePodKubeYaml(pod, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod), "--format", "'{{.HostConfig.Binds}}'"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+
+ correct := fmt.Sprintf("%s:%s:%s", hostPathLocation, hostPathLocation, "ro")
+ Expect(inspect.OutputToString()).To(ContainSubstring(correct))
+ })
})
diff --git a/test/e2e/ps_test.go b/test/e2e/ps_test.go
index a734d399d..a2338c924 100644
--- a/test/e2e/ps_test.go
+++ b/test/e2e/ps_test.go
@@ -104,11 +104,13 @@ var _ = Describe("Podman ps", func() {
SkipIfRemote()
_, ec, _ := podmanTest.RunLsContainer("")
Expect(ec).To(Equal(0))
+ _, ec, _ = podmanTest.RunLsContainer("")
+ Expect(ec).To(Equal(0))
- result := podmanTest.Podman([]string{"ps", "--latest"})
+ result := podmanTest.Podman([]string{"ps", "-q", "--latest"})
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
- Expect(len(result.OutputToStringArray())).Should(BeNumerically(">", 0))
+ Expect(len(result.OutputToStringArray())).Should(Equal(1))
})
It("podman ps last flag", func() {
diff --git a/test/e2e/pull_test.go b/test/e2e/pull_test.go
index 6d1cb6cbc..98b81876a 100644
--- a/test/e2e/pull_test.go
+++ b/test/e2e/pull_test.go
@@ -251,6 +251,49 @@ var _ = Describe("Podman pull", func() {
session = podmanTest.PodmanNoCache([]string{"rmi", "alpine"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
+
+ // Pulling a multi-image archive without further specifying
+ // which image _must_ error out. Pulling is restricted to one
+ // image.
+ session = podmanTest.PodmanNoCache([]string{"pull", fmt.Sprintf("docker-archive:./testdata/image/docker-two-images.tar.xz")})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(125))
+ expectedError := "Unexpected tar manifest.json: expected 1 item, got 2"
+ found, _ := session.ErrorGrepString(expectedError)
+ Expect(found).To(Equal(true))
+
+ // Now pull _one_ image from a multi-image archive via the name
+ // and index syntax.
+ session = podmanTest.PodmanNoCache([]string{"pull", fmt.Sprintf("docker-archive:./testdata/image/docker-two-images.tar.xz:@0")})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.PodmanNoCache([]string{"pull", fmt.Sprintf("docker-archive:./testdata/image/docker-two-images.tar.xz:example.com/empty:latest")})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.PodmanNoCache([]string{"pull", fmt.Sprintf("docker-archive:./testdata/image/docker-two-images.tar.xz:@1")})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.PodmanNoCache([]string{"pull", fmt.Sprintf("docker-archive:./testdata/image/docker-two-images.tar.xz:example.com/empty/but:different")})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Now check for some errors.
+ session = podmanTest.PodmanNoCache([]string{"pull", fmt.Sprintf("docker-archive:./testdata/image/docker-two-images.tar.xz:foo.com/does/not/exist:latest")})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(125))
+ expectedError = "Tag \"foo.com/does/not/exist:latest\" not found"
+ found, _ = session.ErrorGrepString(expectedError)
+ Expect(found).To(Equal(true))
+
+ session = podmanTest.PodmanNoCache([]string{"pull", fmt.Sprintf("docker-archive:./testdata/image/docker-two-images.tar.xz:@2")})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(125))
+ expectedError = "Invalid source index @2, only 2 manifest items available"
+ found, _ = session.ErrorGrepString(expectedError)
+ Expect(found).To(Equal(true))
})
It("podman pull from oci-archive", func() {
diff --git a/test/e2e/run_apparmor_test.go b/test/e2e/run_apparmor_test.go
index 53cac9529..7d522a752 100644
--- a/test/e2e/run_apparmor_test.go
+++ b/test/e2e/run_apparmor_test.go
@@ -155,4 +155,17 @@ profile aa-test-profile flags=(attach_disconnected,mediate_deleted) {
inspect := podmanTest.InspectContainer(cid)
Expect(inspect[0].AppArmorProfile).To(Equal(""))
})
+
+ It("podman run apparmor disabled unconfined", func() {
+ skipIfAppArmorEnabled()
+
+ session := podmanTest.Podman([]string{"create", "--security-opt", "apparmor=unconfined", ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ cid := session.OutputToString()
+ // Verify that apparmor.Profile is being set
+ inspect := podmanTest.InspectContainer(cid)
+ Expect(inspect[0].AppArmorProfile).To(Equal(""))
+ })
})
diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go
index a48f7c83e..c20bfe631 100644
--- a/test/e2e/run_networking_test.go
+++ b/test/e2e/run_networking_test.go
@@ -535,15 +535,12 @@ var _ = Describe("Podman run networking", func() {
create := podmanTest.Podman([]string{"network", "create", "--subnet", "10.25.30.0/24", netName})
create.WaitWithDefaultTimeout()
Expect(create.ExitCode()).To(BeZero())
+ defer podmanTest.removeCNINetwork(netName)
run := podmanTest.Podman([]string{"run", "-t", "-i", "--rm", "--net", netName, "--ip", ipAddr, ALPINE, "ip", "addr"})
run.WaitWithDefaultTimeout()
Expect(run.ExitCode()).To(BeZero())
Expect(run.OutputToString()).To(ContainSubstring(ipAddr))
-
- netrm := podmanTest.Podman([]string{"network", "rm", netName})
- netrm.WaitWithDefaultTimeout()
- Expect(netrm.ExitCode()).To(BeZero())
})
It("podman run with new:pod and static-ip", func() {
@@ -555,6 +552,7 @@ var _ = Describe("Podman run networking", func() {
create := podmanTest.Podman([]string{"network", "create", "--subnet", "10.25.40.0/24", netName})
create.WaitWithDefaultTimeout()
Expect(create.ExitCode()).To(BeZero())
+ defer podmanTest.removeCNINetwork(netName)
run := podmanTest.Podman([]string{"run", "-t", "-i", "--rm", "--pod", "new:" + podname, "--net", netName, "--ip", ipAddr, ALPINE, "ip", "addr"})
run.WaitWithDefaultTimeout()
@@ -564,9 +562,5 @@ var _ = Describe("Podman run networking", func() {
podrm := podmanTest.Podman([]string{"pod", "rm", "-f", podname})
podrm.WaitWithDefaultTimeout()
Expect(podrm.ExitCode()).To(BeZero())
-
- netrm := podmanTest.Podman([]string{"network", "rm", netName})
- netrm.WaitWithDefaultTimeout()
- Expect(netrm.ExitCode()).To(BeZero())
})
})
diff --git a/test/e2e/run_passwd_test.go b/test/e2e/run_passwd_test.go
index c48876dee..dfb8c72a1 100644
--- a/test/e2e/run_passwd_test.go
+++ b/test/e2e/run_passwd_test.go
@@ -71,4 +71,58 @@ USER 1000`
Expect(session.ExitCode()).To(Equal(0))
Expect(session.OutputToString()).To(Not(ContainSubstring("passwd")))
})
+
+ It("podman run with no user specified does not change --group specified", func() {
+ session := podmanTest.Podman([]string{"run", "--read-only", BB, "mount"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.LineInOutputContains("/etc/group")).To(BeFalse())
+ })
+
+ It("podman run group specified in container", func() {
+ session := podmanTest.Podman([]string{"run", "--read-only", "-u", "root:bin", BB, "mount"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.LineInOutputContains("/etc/group")).To(BeFalse())
+ })
+
+ It("podman run non-numeric group not specified in container", func() {
+ session := podmanTest.Podman([]string{"run", "--read-only", "-u", "root:doesnotexist", BB, "mount"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+ })
+
+ It("podman run numeric group specified in container", func() {
+ session := podmanTest.Podman([]string{"run", "--read-only", "-u", "root:11", BB, "mount"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.LineInOutputContains("/etc/group")).To(BeFalse())
+ })
+
+ It("podman run numeric group not specified in container", func() {
+ session := podmanTest.Podman([]string{"run", "--read-only", "-u", "20001:20001", BB, "mount"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.LineInOutputContains("/etc/group")).To(BeTrue())
+ })
+
+ It("podman run numeric user not specified in container modifies group", func() {
+ session := podmanTest.Podman([]string{"run", "--read-only", "-u", "20001", BB, "mount"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.LineInOutputContains("/etc/group")).To(BeTrue())
+ })
+
+ It("podman run numeric group from image and no group file", func() {
+ SkipIfRemote()
+ dockerfile := `FROM alpine
+RUN rm -f /etc/passwd /etc/shadow /etc/group
+USER 1000`
+ imgName := "testimg"
+ podmanTest.BuildImage(dockerfile, imgName, "false")
+ session := podmanTest.Podman([]string{"run", "--rm", imgName, "ls", "/etc/"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(Not(ContainSubstring("/etc/group")))
+ })
})
diff --git a/test/e2e/run_privileged_test.go b/test/e2e/run_privileged_test.go
index ca8da981f..064ba7d2c 100644
--- a/test/e2e/run_privileged_test.go
+++ b/test/e2e/run_privileged_test.go
@@ -2,13 +2,36 @@ package integration
import (
"os"
+ "strconv"
"strings"
. "github.com/containers/podman/v2/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+ "github.com/syndtr/gocapability/capability"
)
+// helper function for confirming that container capabilities are equal
+// to those of the host, but only to the extent of caps we (podman)
+// know about at compile time. That is: the kernel may have more caps
+// available than we are aware of, leading to host=FFF... and ctr=3FF...
+// because the latter is all we request. Accept that.
+func containerCapMatchesHost(ctr_cap string, host_cap string) {
+ ctr_cap_n, err := strconv.ParseUint(ctr_cap, 16, 64)
+ Expect(err).NotTo(HaveOccurred(), "Error parsing %q as hex", ctr_cap)
+
+ host_cap_n, err := strconv.ParseUint(host_cap, 16, 64)
+ Expect(err).NotTo(HaveOccurred(), "Error parsing %q as hex", host_cap)
+
+ // host caps can never be zero (except rootless, which we don't test).
+ // and host caps must always be a superset (inclusive) of container
+ Expect(host_cap_n).To(BeNumerically(">", 0), "host cap %q should be nonzero", host_cap)
+ Expect(host_cap_n).To(BeNumerically(">=", ctr_cap_n), "host cap %q should never be less than container cap %q", host_cap, ctr_cap)
+
+ host_cap_masked := host_cap_n & (1<<len(capability.List()) - 1)
+ Expect(ctr_cap_n).To(Equal(host_cap_masked), "container cap %q is not a subset of host cap %q", ctr_cap, host_cap)
+}
+
var _ = Describe("Podman privileged container tests", func() {
var (
tempdir string
@@ -44,24 +67,27 @@ var _ = Describe("Podman privileged container tests", func() {
It("podman privileged CapEff", func() {
SkipIfRootless()
- cap := SystemExec("grep", []string{"CapEff", "/proc/self/status"})
- Expect(cap.ExitCode()).To(Equal(0))
+ host_cap := SystemExec("awk", []string{"/^CapEff/ { print $2 }", "/proc/self/status"})
+ Expect(host_cap.ExitCode()).To(Equal(0))
- session := podmanTest.Podman([]string{"run", "--privileged", "busybox", "grep", "CapEff", "/proc/self/status"})
+ session := podmanTest.Podman([]string{"run", "--privileged", "busybox", "awk", "/^CapEff/ { print $2 }", "/proc/self/status"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- Expect(session.OutputToString()).To(Equal(cap.OutputToString()))
+
+ containerCapMatchesHost(session.OutputToString(), host_cap.OutputToString())
})
It("podman cap-add CapEff", func() {
SkipIfRootless()
- cap := SystemExec("grep", []string{"CapEff", "/proc/self/status"})
- Expect(cap.ExitCode()).To(Equal(0))
+ // Get caps of current process
+ host_cap := SystemExec("awk", []string{"/^CapEff/ { print $2 }", "/proc/self/status"})
+ Expect(host_cap.ExitCode()).To(Equal(0))
- session := podmanTest.Podman([]string{"run", "--cap-add", "all", "busybox", "grep", "CapEff", "/proc/self/status"})
+ session := podmanTest.Podman([]string{"run", "--cap-add", "all", "busybox", "awk", "/^CapEff/ { print $2 }", "/proc/self/status"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- Expect(session.OutputToString()).To(Equal(cap.OutputToString()))
+
+ containerCapMatchesHost(session.OutputToString(), host_cap.OutputToString())
})
It("podman cap-drop CapEff", func() {
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index 91b0d3e48..a67f7df92 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -325,10 +325,10 @@ USER bin`
Expect(session.ExitCode()).To(Equal(0))
}
- session = podmanTest.Podman([]string{"run", "--rm", "--oom-score-adj=100", fedoraMinimal, "cat", "/proc/self/oom_score_adj"})
+ session = podmanTest.Podman([]string{"run", "--rm", "--oom-score-adj=111", fedoraMinimal, "cat", "/proc/self/oom_score_adj"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- Expect(session.OutputToString()).To(ContainSubstring("100"))
+ Expect(session.OutputToString()).To(Equal("111"))
})
It("podman run limits host test", func() {
@@ -919,6 +919,14 @@ USER mail`
Expect(session.OutputToString()).To(Not(ContainSubstring("/dev/shm type tmpfs (ro,")))
})
+ It("podman run readonly container should NOT mount /run noexec", func() {
+ session := podmanTest.Podman([]string{"run", "--read-only", ALPINE, "sh", "-c", "mount | grep \"/run \""})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ Expect(session.OutputToString()).To(Not(ContainSubstring("noexec")))
+ })
+
It("podman run with bad healthcheck retries", func() {
session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "[\"foo\"]", "--health-retries", "0", ALPINE, "top"})
session.Wait()
diff --git a/test/e2e/run_userns_test.go b/test/e2e/run_userns_test.go
index 25f8d0d15..8d860cfc3 100644
--- a/test/e2e/run_userns_test.go
+++ b/test/e2e/run_userns_test.go
@@ -277,6 +277,13 @@ var _ = Describe("Podman UserNS support", func() {
ok, _ := session.GrepString("4998")
Expect(ok).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"run", "--rm", "--userns=container:" + ctrName, "--net=container:" + ctrName, "alpine", "cat", "/proc/self/uid_map"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ ok, _ = session.GrepString("4998")
+ Expect(ok).To(BeTrue())
})
It("podman --user with volume", func() {
diff --git a/test/e2e/runlabel_test.go b/test/e2e/runlabel_test.go
index de79b2b98..0eb679fbf 100644
--- a/test/e2e/runlabel_test.go
+++ b/test/e2e/runlabel_test.go
@@ -29,6 +29,8 @@ var _ = Describe("podman container runlabel", func() {
)
BeforeEach(func() {
+ // runlabel is not supported for remote connections
+ SkipIfRemote()
tempdir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
@@ -46,7 +48,6 @@ var _ = Describe("podman container runlabel", func() {
})
It("podman container runlabel (podman --version)", func() {
- SkipIfRemote()
image := "podman-runlabel-test:podman"
podmanTest.BuildImage(PodmanDockerfile, image, "false")
@@ -60,7 +61,6 @@ var _ = Describe("podman container runlabel", func() {
})
It("podman container runlabel (ls -la)", func() {
- SkipIfRemote()
image := "podman-runlabel-test:ls"
podmanTest.BuildImage(LsDockerfile, image, "false")
@@ -72,6 +72,19 @@ var _ = Describe("podman container runlabel", func() {
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
})
+ It("podman container runlabel --display", func() {
+ image := "podman-runlabel-test:ls"
+ podmanTest.BuildImage(LsDockerfile, image, "false")
+
+ result := podmanTest.Podman([]string{"container", "runlabel", "--display", "RUN", image})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(result.OutputToString()).To(ContainSubstring(podmanTest.PodmanBinary + " -la"))
+
+ result = podmanTest.Podman([]string{"rmi", image})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ })
It("podman container runlabel bogus label should result in non-zero exit code", func() {
result := podmanTest.Podman([]string{"container", "runlabel", "RUN", ALPINE})
result.WaitWithDefaultTimeout()
@@ -100,7 +113,6 @@ var _ = Describe("podman container runlabel", func() {
})
It("runlabel should fail with nonexist authfile", func() {
- SkipIfRemote()
image := "podman-runlabel-test:podman"
podmanTest.BuildImage(PodmanDockerfile, image, "false")
diff --git a/test/e2e/save_test.go b/test/e2e/save_test.go
index e1396f1b2..1f1258be3 100644
--- a/test/e2e/save_test.go
+++ b/test/e2e/save_test.go
@@ -128,4 +128,51 @@ var _ = Describe("Podman save", func() {
save.WaitWithDefaultTimeout()
Expect(save.ExitCode()).To(Equal(0))
})
+
+ It("podman save --multi-image-archive (tagged images)", func() {
+ multiImageSave(podmanTest, RESTORE_IMAGES)
+ })
+
+ It("podman save --multi-image-archive (untagged images)", func() {
+ // Refer to images via ID instead of tag.
+ session := podmanTest.PodmanNoCache([]string{"images", "--format", "{{.ID}}"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ ids := session.OutputToStringArray()
+
+ Expect(len(RESTORE_IMAGES), len(ids))
+ multiImageSave(podmanTest, ids)
+ })
})
+
+// Create a multi-image archive, remove all images, load it and
+// make sure that all images are (again) present.
+func multiImageSave(podmanTest *PodmanTestIntegration, images []string) {
+ // Create the archive.
+ outfile := filepath.Join(podmanTest.TempDir, "temp.tar")
+ session := podmanTest.PodmanNoCache(append([]string{"save", "-o", outfile, "--multi-image-archive"}, images...))
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Remove all images.
+ session = podmanTest.PodmanNoCache([]string{"rmi", "-af"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Now load the archive.
+ session = podmanTest.PodmanNoCache([]string{"load", "-i", outfile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ // Grep for each image in the `podman load` output.
+ for _, image := range images {
+ found, _ := session.GrepString(image)
+ Expect(found).Should(BeTrue())
+ }
+
+ // Make sure that each image has really been loaded.
+ for _, image := range images {
+ session = podmanTest.PodmanNoCache([]string{"image", "exists", image})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ }
+}
diff --git a/test/e2e/systemd_test.go b/test/e2e/systemd_test.go
index b5114e429..9a3247b77 100644
--- a/test/e2e/systemd_test.go
+++ b/test/e2e/systemd_test.go
@@ -146,4 +146,12 @@ WantedBy=multi-user.target
Expect(len(conData)).To(Equal(1))
Expect(conData[0].Config.SystemdMode).To(BeTrue())
})
+
+ It("podman run --systemd container should NOT mount /run noexec", func() {
+ session := podmanTest.Podman([]string{"run", "--systemd", "always", ALPINE, "sh", "-c", "mount | grep \"/run \""})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ Expect(session.OutputToString()).To(Not(ContainSubstring("noexec")))
+ })
})
diff --git a/test/e2e/testdata/image b/test/e2e/testdata/image
new file mode 120000
index 000000000..a9e67bf9a
--- /dev/null
+++ b/test/e2e/testdata/image
@@ -0,0 +1 @@
+../../../libpod/image/testdata/ \ No newline at end of file
diff --git a/test/system/001-basic.bats b/test/system/001-basic.bats
index a5a3324fb..1d5eb066b 100644
--- a/test/system/001-basic.bats
+++ b/test/system/001-basic.bats
@@ -69,6 +69,17 @@ function setup() {
is "$output" "Error: unknown flag: --remote" "podman version --remote"
}
+# Check that just calling "podman-remote" prints the usage message even
+# without a running endpoint. Use "podman --remote" for this as this works the same.
+@test "podman-remote: check for command usage message without a running endpoint" {
+ if is_remote; then
+ skip "only applicable on a local run since this requires no endpoint"
+ fi
+
+ run_podman 125 --remote
+ is "$output" "Error: missing command 'podman COMMAND'" "podman remote show usage message without running endpoint"
+}
+
# This is for development only; it's intended to make sure our timeout
# in run_podman continues to work. This test should never run in production
# because it will, by definition, fail.
diff --git a/test/system/010-images.bats b/test/system/010-images.bats
index 7fd731ca0..c0a8936e3 100644
--- a/test/system/010-images.bats
+++ b/test/system/010-images.bats
@@ -14,6 +14,8 @@ load helpers
--format {{.ID}} | [0-9a-f]\\\{12\\\}
--format {{.ID}} --no-trunc | sha256:[0-9a-f]\\\{64\\\}
--format {{.Repository}}:{{.Tag}} | $PODMAN_TEST_IMAGE_FQN
+--format {{.Labels.created_by}} | test/system/build-testimage
+--format {{.Labels.created_at}} | 20[0-9-]\\\+T[0-9:]\\\+Z
"
parse_table "$tests" | while read fmt expect; do
@@ -27,11 +29,13 @@ load helpers
@test "podman images - json" {
# 'created': podman includes fractional seconds, podman-remote does not
tests="
-Names[0] | $PODMAN_TEST_IMAGE_FQN
-Id | [0-9a-f]\\\{64\\\}
-Digest | sha256:[0-9a-f]\\\{64\\\}
-CreatedAt | [0-9-]\\\+T[0-9:.]\\\+Z
-Size | [0-9]\\\+
+Names[0] | $PODMAN_TEST_IMAGE_FQN
+Id | [0-9a-f]\\\{64\\\}
+Digest | sha256:[0-9a-f]\\\{64\\\}
+CreatedAt | [0-9-]\\\+T[0-9:.]\\\+Z
+Size | [0-9]\\\+
+Labels.created_by | test/system/build-testimage
+Labels.created_at | 20[0-9-]\\\+T[0-9:]\\\+Z
"
run_podman images -a --format json
diff --git a/test/system/030-run.bats b/test/system/030-run.bats
index 198c8881d..4e518c571 100644
--- a/test/system/030-run.bats
+++ b/test/system/030-run.bats
@@ -134,24 +134,29 @@ echo $rand | 0 | $rand
run_podman run --pull=never $IMAGE true
is "$output" "" "--pull=never [present]: no output"
- # Now test with busybox, which we don't have present
- run_podman 125 run --pull=never busybox true
- is "$output" "Error: unable to find a name and tag match for busybox in repotags: no such image" "--pull=never [busybox/missing]: error"
+ # Now test with a remote image which we don't have present (the 00 tag)
+ NONLOCAL_IMAGE="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODMAN_TEST_IMAGE_NAME:00000000"
- run_podman run --pull=missing busybox true
- is "$output" "Trying to pull .*" "--pull=missing [busybox/missing]: fetches"
+ run_podman 125 run --pull=never $NONLOCAL_IMAGE true
+ is "$output" "Error: unable to find a name and tag match for $NONLOCAL_IMAGE in repotags: no such image" "--pull=never [with image not present]: error"
- run_podman run --pull=always busybox true
- is "$output" "Trying to pull .*" "--pull=always [busybox/present]: fetches"
+ run_podman run --pull=missing $NONLOCAL_IMAGE true
+ is "$output" "Trying to pull .*" "--pull=missing [with image NOT PRESENT]: fetches"
+
+ run_podman run --pull=missing $NONLOCAL_IMAGE true
+ is "$output" "" "--pull=missing [with image PRESENT]: does not re-fetch"
+
+ run_podman run --pull=always $NONLOCAL_IMAGE true
+ is "$output" "Trying to pull .*" "--pull=always [with image PRESENT]: re-fetches"
run_podman rm -a
- run_podman rmi busybox
+ run_podman rmi $NONLOCAL_IMAGE
}
# 'run --rmi' deletes the image in the end unless it's used by another container
@test "podman run --rmi" {
# Name of a nonlocal image. It should be pulled in by the first 'run'
- NONLOCAL_IMAGE=busybox
+ NONLOCAL_IMAGE="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODMAN_TEST_IMAGE_NAME:00000000"
run_podman 1 image exists $NONLOCAL_IMAGE
# Run a container, without --rm; this should block subsequent --rmi
@@ -184,9 +189,19 @@ echo $rand | 0 | $rand
is "$(< $cidfile)" "$cid" "contents of cidfile == container ID"
- conmon_pid=$(< $pidfile)
- is "$(readlink /proc/$conmon_pid/exe)" ".*/conmon" \
- "conmon pidfile (= PID $conmon_pid) points to conmon process"
+ # Cross-check --conmon-pidfile against 'podman inspect'
+ local conmon_pid_from_file=$(< $pidfile)
+ run_podman inspect --format '{{.State.ConmonPid}}' $cid
+ local conmon_pid_from_inspect="$output"
+ is "$conmon_pid_from_file" "$conmon_pid_from_inspect" \
+ "Conmon pid in pidfile matches what 'podman inspect' claims"
+
+ # /proc/PID/exe should be a symlink to a conmon executable
+ # FIXME: 'echo' and 'ls' are to help debug #7580, a CI flake
+ echo "conmon pid = $conmon_pid_from_file"
+ ls -l /proc/$conmon_pid_from_file
+ is "$(readlink /proc/$conmon_pid_from_file/exe)" ".*/conmon" \
+ "conmon pidfile (= PID $conmon_pid_from_file) points to conmon process"
# All OK. Kill container.
run_podman rm -f $cid
@@ -199,7 +214,7 @@ echo $rand | 0 | $rand
}
@test "podman run docker-archive" {
- skip_if_remote "FIXME: pending #7116"
+ skip_if_remote "podman-remote does not support docker-archive (#7116)"
# Create an image that, when run, outputs a random magic string
expect=$(random_string 20)
diff --git a/test/system/055-rm.bats b/test/system/055-rm.bats
index 478ba0f20..c8475c3e9 100644
--- a/test/system/055-rm.bats
+++ b/test/system/055-rm.bats
@@ -44,8 +44,6 @@ load helpers
#
# See https://github.com/containers/podman/issues/3795
@test "podman rm -f" {
- skip_if_remote "FIXME: pending #7117"
-
rand=$(random_string 30)
( sleep 3; run_podman rm -f $rand ) &
run_podman 137 run --name $rand $IMAGE sleep 30
diff --git a/test/system/110-history.bats b/test/system/110-history.bats
index 5dc221d61..75c15b088 100644
--- a/test/system/110-history.bats
+++ b/test/system/110-history.bats
@@ -22,9 +22,10 @@ load helpers
}
@test "podman history - json" {
+ # Sigh. Timestamp in .created can be '...Z' or '...-06:00'
tests="
id | [0-9a-f]\\\{64\\\}
-created | [0-9-]\\\+T[0-9:.]\\\+Z
+created | [0-9-]\\\+T[0-9:.]\\\+[Z0-9:+-]\\\+
size | -\\\?[0-9]\\\+
"
diff --git a/test/system/120-load.bats b/test/system/120-load.bats
index 86b396c4a..d7aa16d95 100644
--- a/test/system/120-load.bats
+++ b/test/system/120-load.bats
@@ -27,25 +27,43 @@ verify_iid_and_name() {
}
@test "podman save to pipe and load" {
- get_iid_and_name
+ # Generate a random name and tag (must be lower-case)
+ local random_name=x$(random_string 12 | tr A-Z a-z)
+ local random_tag=t$(random_string 7 | tr A-Z a-z)
+ local fqin=localhost/$random_name:$random_tag
+ run_podman tag $IMAGE $fqin
+
+ archive=$PODMAN_TMPDIR/myimage-$(random_string 8).tar
# We can't use run_podman because that uses the BATS 'run' function
# which redirects stdout and stderr. Here we need to guarantee
# that podman's stdout is a pipe, not any other form of redirection
- $PODMAN save --format oci-archive $IMAGE | cat >$archive
+ $PODMAN save --format oci-archive $fqin | cat >$archive
if [ "$status" -ne 0 ]; then
die "Command failed: podman save ... | cat"
fi
# Make sure we can reload it
- # FIXME: when/if 7337 gets fixed, add a random tag instead of rmi'ing
- # FIXME: when/if 7371 gets fixed, use verify_iid_and_name()
- run_podman rmi $iid
+ run_podman rmi $fqin
run_podman load -i $archive
- # FIXME: cannot compare IID, see #7371
- run_podman images -a --format '{{.Repository}}:{{.Tag}}'
- is "$output" "$IMAGE" "image preserves name across save/load"
+ # FIXME: cannot compare IID, see #7371, so we check only the tag
+ run_podman images $fqin --format '{{.Repository}}:{{.Tag}}'
+ is "$output" "$fqin" "image preserves name across save/load"
+
+ # FIXME: when/if 7337 gets fixed, load with a new tag
+ if false; then
+ local new_name=x$(random_string 14 | tr A-Z a-z)
+ local new_tag=t$(random_string 6 | tr A-Z a-z)
+ run_podman rmi $fqin
+ fqin=localhost/$new_name:$new_tag
+ run_podman load -i $archive $fqin
+ run_podman images $fqin --format '{{.Repository}}:{{.Tag}}'
+ is "$output" "$fqin" "image can be loaded with new name:tag"
+ fi
+
+ # Clean up
+ run_podman rmi $fqin
}
diff --git a/test/system/130-kill.bats b/test/system/130-kill.bats
index 05090f852..c16e64c58 100644
--- a/test/system/130-kill.bats
+++ b/test/system/130-kill.bats
@@ -6,8 +6,6 @@
load helpers
@test "podman kill - test signal handling in containers" {
- skip_if_remote "FIXME: pending #7135"
-
# podman-remote and crun interact poorly in f31: crun seems to gobble up
# some signals.
# Workaround: run 'env --default-signal sh' instead of just 'sh' in
diff --git a/test/system/150-login.bats b/test/system/150-login.bats
index 00c60ca95..5151ab0e1 100644
--- a/test/system/150-login.bats
+++ b/test/system/150-login.bats
@@ -56,14 +56,17 @@ function setup() {
AUTHDIR=${PODMAN_LOGIN_WORKDIR}/auth
mkdir -p $AUTHDIR
+ # Registry image; copy of docker.io, but on our own registry
+ local REGISTRY_IMAGE="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/registry:2.7"
+
# Pull registry image, but into a separate container storage
mkdir -p ${PODMAN_LOGIN_WORKDIR}/root
mkdir -p ${PODMAN_LOGIN_WORKDIR}/runroot
PODMAN_LOGIN_ARGS="--root ${PODMAN_LOGIN_WORKDIR}/root --runroot ${PODMAN_LOGIN_WORKDIR}/runroot"
# Give it three tries, to compensate for flakes
- run_podman ${PODMAN_LOGIN_ARGS} pull registry:2.6 ||
- run_podman ${PODMAN_LOGIN_ARGS} pull registry:2.6 ||
- run_podman ${PODMAN_LOGIN_ARGS} pull registry:2.6
+ run_podman ${PODMAN_LOGIN_ARGS} pull $REGISTRY_IMAGE ||
+ run_podman ${PODMAN_LOGIN_ARGS} pull $REGISTRY_IMAGE ||
+ run_podman ${PODMAN_LOGIN_ARGS} pull $REGISTRY_IMAGE
# Registry image needs a cert. Self-signed is good enough.
CERT=$AUTHDIR/domain.crt
@@ -76,10 +79,8 @@ function setup() {
# Store credentials where container will see them
if [ ! -e $AUTHDIR/htpasswd ]; then
- run_podman ${PODMAN_LOGIN_ARGS} run --rm \
- --entrypoint htpasswd registry:2.6 \
- -Bbn ${PODMAN_LOGIN_USER} ${PODMAN_LOGIN_PASS} \
- > $AUTHDIR/htpasswd
+ htpasswd -Bbn ${PODMAN_LOGIN_USER} ${PODMAN_LOGIN_PASS} \
+ > $AUTHDIR/htpasswd
# In case $PODMAN_TEST_KEEP_LOGIN_REGISTRY is set, for testing later
echo "${PODMAN_LOGIN_USER}:${PODMAN_LOGIN_PASS}" \
@@ -97,7 +98,7 @@ function setup() {
-e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \
-e REGISTRY_HTTP_TLS_CERTIFICATE=/auth/domain.crt \
-e REGISTRY_HTTP_TLS_KEY=/auth/domain.key \
- registry:2.6
+ $REGISTRY_IMAGE
}
# END first "test" - start a registry for use by other tests
@@ -189,38 +190,26 @@ EOF
}
@test "podman push ok" {
- # ARGH! We can't push $IMAGE (alpine_labels) to this registry; error is:
- #
- # Writing manifest to image destination
- # Error: Error copying image to the remote destination: Error writing manifest: Error uploading manifest latest to localhost:${PODMAN_LOGIN_REGISTRY_PORT}/okpush: received unexpected HTTP status: 500 Internal Server Error
- #
- # Root cause: something to do with v1/v2 s1/s2:
- #
- # https://github.com/containers/skopeo/issues/651
- #
-
- run_podman pull busybox
-
- # Preserve its ID for later comparison against push/pulled image
- run_podman inspect --format '{{.Id}}' busybox
- id_busybox=$output
+ # Preserve image ID for later comparison against push/pulled image
+ run_podman inspect --format '{{.Id}}' $IMAGE
+ iid=$output
destname=ok-$(random_string 10 | tr A-Z a-z)-ok
# Use command-line credentials
run_podman push --tls-verify=false \
--creds ${PODMAN_LOGIN_USER}:${PODMAN_LOGIN_PASS} \
- busybox localhost:${PODMAN_LOGIN_REGISTRY_PORT}/$destname
+ $IMAGE localhost:${PODMAN_LOGIN_REGISTRY_PORT}/$destname
# Yay! Pull it back
run_podman pull --tls-verify=false \
--creds ${PODMAN_LOGIN_USER}:${PODMAN_LOGIN_PASS} \
localhost:${PODMAN_LOGIN_REGISTRY_PORT}/$destname
- # Compare to original busybox
+ # Compare to original image
run_podman inspect --format '{{.Id}}' $destname
- is "$output" "$id_busybox" "Image ID of pulled image == busybox"
+ is "$output" "$iid" "Image ID of pulled image == original IID"
- run_podman rmi busybox $destname
+ run_podman rmi $destname
}
# END primary podman login/push/pull tests
diff --git a/test/system/260-sdnotify.bats b/test/system/260-sdnotify.bats
index 62d3c1497..06aa3bba7 100644
--- a/test/system/260-sdnotify.bats
+++ b/test/system/260-sdnotify.bats
@@ -115,9 +115,10 @@ function _assert_mainpid_is_conmon() {
@test "sdnotify : container" {
# Sigh... we need to pull a humongous image because it has systemd-notify.
+ # (IMPORTANT: fedora:32 and above silently removed systemd-notify; this
+ # caused CI to hang. That's why we explicitly require fedora:31)
# FIXME: is there a smaller image we could use?
- _FEDORA=registry.fedoraproject.org/fedora:31
-
+ local _FEDORA="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/fedora:31"
# Pull that image. Retry in case of flakes.
run_podman pull $_FEDORA || \
run_podman pull $_FEDORA || \
diff --git a/test/system/500-networking.bats b/test/system/500-networking.bats
index 0fae3dcd3..39de8ad54 100644
--- a/test/system/500-networking.bats
+++ b/test/system/500-networking.bats
@@ -12,7 +12,7 @@ load helpers
random_2=$(random_string 30)
HOST_PORT=8080
- SERVER=http://localhost:$HOST_PORT
+ SERVER=http://127.0.0.1:$HOST_PORT
# Create a test file with random content
INDEX1=$PODMAN_TMPDIR/hello.txt
@@ -22,7 +22,7 @@ load helpers
run_podman run -d --name myweb -p "$HOST_PORT:80" \
-v $INDEX1:/var/www/index.txt \
-w /var/www \
- busybox httpd -f -p 80
+ $IMAGE /bin/busybox-extras httpd -f -p 80
cid=$output
# In that container, create a second file, using exec and redirection
@@ -33,14 +33,14 @@ load helpers
# Verify http contents: curl from localhost
run curl -s $SERVER/index.txt
- is "$output" "$random_1" "curl localhost:/index.txt"
+ is "$output" "$random_1" "curl 127.0.0.1:/index.txt"
run curl -s $SERVER/index2.txt
- is "$output" "$random_2" "curl localhost:/index2.txt"
+ is "$output" "$random_2" "curl 127.0.0.1:/index2.txt"
# Verify http contents: wget from a second container
- run_podman run --rm --net=host busybox wget -qO - $SERVER/index.txt
+ run_podman run --rm --net=host $IMAGE wget -qO - $SERVER/index.txt
is "$output" "$random_1" "podman wget /index.txt"
- run_podman run --rm --net=host busybox wget -qO - $SERVER/index2.txt
+ run_podman run --rm --net=host $IMAGE wget -qO - $SERVER/index2.txt
is "$output" "$random_2" "podman wget /index2.txt"
# Tests #4889 - two-argument form of "podman ports" was broken
@@ -57,7 +57,6 @@ load helpers
# Clean up
run_podman stop -t 1 myweb
run_podman rm myweb
- run_podman rmi busybox
}
# Issue #5466 - port-forwarding doesn't work with this option and -d
diff --git a/test/system/build-testimage b/test/system/build-testimage
new file mode 100755
index 000000000..64aa46337
--- /dev/null
+++ b/test/system/build-testimage
@@ -0,0 +1,59 @@
+#!/bin/bash
+#
+# build-testimage - script for producing a test image for podman CI
+#
+# The idea is to have a small multi-purpose image that can be pulled once
+# by system tests and used for as many tests as possible. This image
+# should live on quay.io, should be small in size, and should include
+# as many components as needed by system tests so they don't have to
+# pull other images.
+#
+# Unfortunately, "small" is incompatible with "systemd" so tests
+# still need a fedora image for that.
+#
+
+# Tag for this new image
+YMD=$(date +%Y%m%d)
+
+# git-relative path to this script
+create_script=$(cd $(dirname $0) && git ls-files --full-name $(basename $0))
+if [ -z "$create_script" ]; then
+ create_script=$0
+fi
+
+# Creation timestamp, Zulu time
+create_time_z=$(env TZ=UTC date +'%Y-%m-%dT%H:%M:%SZ')
+
+set -ex
+
+# Please document the reason for all flags, apk's, and anything non-obvious
+#
+# --squash-all : needed by 'tree' test in 070-build.bats
+# busybox-extras : provides httpd needed in 500-networking.bats
+#
+podman rmi -f testimage &> /dev/null || true
+podman build --squash-all -t testimage - <<EOF
+FROM docker.io/library/alpine:3.12.0
+RUN apk add busybox-extras
+LABEL created_by=$create_script
+LABEL created_at=$create_time_z
+CMD ["/bin/echo", "This container is intended for podman CI testing"]
+EOF
+
+# Tag and push to quay.
+podman tag testimage quay.io/edsantiago/testimage:$YMD
+podman push quay.io/edsantiago/testimage:$YMD
+
+# Side note: there should always be a testimage tagged ':00000000'
+# (eight zeroes) in the same location; this is used by tests which
+# need to pull a non-locally-cached image. This image will rarely
+# if ever need to change, nor in fact does it even have to be a
+# copy of this testimage since all we use it for is 'true'.
+#
+# As of 2020-09-02 it is simply busybox, because it is super small:
+#
+# podman pull docker.io/library/busybox:1.32.0
+# podman tag docker.io/library/busybox:1.32.0 \
+# quay.io/edsantiago/testimage:00000000
+# podman push quay.io/edsantiago/testimage:00000000
+#
diff --git a/test/system/helpers.bash b/test/system/helpers.bash
index a6414344e..514ba249e 100644
--- a/test/system/helpers.bash
+++ b/test/system/helpers.bash
@@ -6,8 +6,8 @@ PODMAN=${PODMAN:-podman}
# Standard image to use for most tests
PODMAN_TEST_IMAGE_REGISTRY=${PODMAN_TEST_IMAGE_REGISTRY:-"quay.io"}
PODMAN_TEST_IMAGE_USER=${PODMAN_TEST_IMAGE_USER:-"libpod"}
-PODMAN_TEST_IMAGE_NAME=${PODMAN_TEST_IMAGE_NAME:-"alpine_labels"}
-PODMAN_TEST_IMAGE_TAG=${PODMAN_TEST_IMAGE_TAG:-"latest"}
+PODMAN_TEST_IMAGE_NAME=${PODMAN_TEST_IMAGE_NAME:-"testimage"}
+PODMAN_TEST_IMAGE_TAG=${PODMAN_TEST_IMAGE_TAG:-"20200902"}
PODMAN_TEST_IMAGE_FQN="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODMAN_TEST_IMAGE_NAME:$PODMAN_TEST_IMAGE_TAG"
# Because who wants to spell that out each time?
@@ -67,7 +67,7 @@ function basic_teardown() {
run_podman '?' pod rm --all --force
run_podman '?' rm --all --force
- /bin/rm -rf $PODMAN_TMPDIR
+ command rm -rf $PODMAN_TMPDIR
}
diff --git a/troubleshooting.md b/troubleshooting.md
index 7e8f9bcb0..9677b1821 100644
--- a/troubleshooting.md
+++ b/troubleshooting.md
@@ -592,3 +592,28 @@ access to that port. For example:
```
$ podman run --pod srcview --name src-expose -v "${PWD}:/var/opt/localrepo":Z,ro sourcegraph/src-expose:latest serve /var/opt/localrepo
```
+
+### 24) Podman container images fail with `fuse: device not found` when run
+
+Some container images require that the fuse kernel module is loaded in the kernel
+before they will run with the fuse filesystem in play.
+
+#### Symptom
+
+When trying to run the container images found at quay.io/podman, quay.io/containers
+registry.access.redhat.com/ubi8 or other locations, an error will sometimes be returned:
+
+```
+ERRO error unmounting /var/lib/containers/storage/overlay/30c058cdadc888177361dd14a7ed7edab441c58525b341df321f07bc11440e68/merged: invalid argument
+error mounting container "1ae176ca72b3da7c70af31db7434bcf6f94b07dbc0328bc7e4e8fc9579d0dc2e": error mounting build container "1ae176ca72b3da7c70af31db7434bcf6f94b07dbc0328bc7e4e8fc9579d0dc2e": error creating overlay mount to /var/lib/containers/storage/overlay/30c058cdadc888177361dd14a7ed7edab441c58525b341df321f07bc11440e68/merged: using mount program /usr/bin/fuse-overlayfs: fuse: device not found, try 'modprobe fuse' first
+fuse-overlayfs: cannot mount: No such device
+: exit status 1
+ERRO exit status 1
+```
+
+#### Solution
+
+If you encounter a `fuse: device not found` error when running the container image, it is likely that
+the fuse kernel module has not been loaded on your host system. Use the command `modprobe fuse` to load the
+module and then run the container image afterwards. To enable this automatically at boot time, you can add a configuration
+file to `/etc/modules.load.d`. See `man modules-load.d` for more details.
diff --git a/vendor/github.com/containers/common/pkg/seccomp/conversion.go b/vendor/github.com/containers/common/pkg/seccomp/conversion.go
index 79a893ba3..dfab381a5 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/conversion.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/conversion.go
@@ -1,25 +1,92 @@
+// NOTE: this package has originally been copied from
+// github.com/opencontainers/runc and modified to work for other use cases
+
package seccomp
-import "fmt"
-
-var goArchToSeccompArchMap = map[string]Arch{
- "386": ArchX86,
- "amd64": ArchX86_64,
- "amd64p32": ArchX32,
- "arm": ArchARM,
- "arm64": ArchAARCH64,
- "mips": ArchMIPS,
- "mips64": ArchMIPS64,
- "mips64le": ArchMIPSEL64,
- "mips64p32": ArchMIPS64N32,
- "mips64p32le": ArchMIPSEL64N32,
- "mipsle": ArchMIPSEL,
- "ppc": ArchPPC,
- "ppc64": ArchPPC64,
- "ppc64le": ArchPPC64LE,
- "s390": ArchS390,
- "s390x": ArchS390X,
-}
+import (
+ "fmt"
+
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+)
+
+var (
+ goArchToSeccompArchMap = map[string]Arch{
+ "386": ArchX86,
+ "amd64": ArchX86_64,
+ "amd64p32": ArchX32,
+ "arm": ArchARM,
+ "arm64": ArchAARCH64,
+ "mips": ArchMIPS,
+ "mips64": ArchMIPS64,
+ "mips64le": ArchMIPSEL64,
+ "mips64p32": ArchMIPS64N32,
+ "mips64p32le": ArchMIPSEL64N32,
+ "mipsle": ArchMIPSEL,
+ "ppc": ArchPPC,
+ "ppc64": ArchPPC64,
+ "ppc64le": ArchPPC64LE,
+ "s390": ArchS390,
+ "s390x": ArchS390X,
+ }
+ specArchToLibseccompArchMap = map[specs.Arch]string{
+ specs.ArchX86: "x86",
+ specs.ArchX86_64: "amd64",
+ specs.ArchX32: "x32",
+ specs.ArchARM: "arm",
+ specs.ArchAARCH64: "arm64",
+ specs.ArchMIPS: "mips",
+ specs.ArchMIPS64: "mips64",
+ specs.ArchMIPS64N32: "mips64n32",
+ specs.ArchMIPSEL: "mipsel",
+ specs.ArchMIPSEL64: "mipsel64",
+ specs.ArchMIPSEL64N32: "mipsel64n32",
+ specs.ArchPPC: "ppc",
+ specs.ArchPPC64: "ppc64",
+ specs.ArchPPC64LE: "ppc64le",
+ specs.ArchS390: "s390",
+ specs.ArchS390X: "s390x",
+ }
+ specArchToSeccompArchMap = map[specs.Arch]Arch{
+ specs.ArchX86: ArchX86,
+ specs.ArchX86_64: ArchX86_64,
+ specs.ArchX32: ArchX32,
+ specs.ArchARM: ArchARM,
+ specs.ArchAARCH64: ArchAARCH64,
+ specs.ArchMIPS: ArchMIPS,
+ specs.ArchMIPS64: ArchMIPS64,
+ specs.ArchMIPS64N32: ArchMIPS64N32,
+ specs.ArchMIPSEL: ArchMIPSEL,
+ specs.ArchMIPSEL64: ArchMIPSEL64,
+ specs.ArchMIPSEL64N32: ArchMIPSEL64N32,
+ specs.ArchPPC: ArchPPC,
+ specs.ArchPPC64: ArchPPC64,
+ specs.ArchPPC64LE: ArchPPC64LE,
+ specs.ArchS390: ArchS390,
+ specs.ArchS390X: ArchS390X,
+ }
+ specActionToSeccompActionMap = map[specs.LinuxSeccompAction]Action{
+ specs.ActKill: ActKill,
+ // TODO: wait for this PR to get merged:
+ // https://github.com/opencontainers/runtime-spec/pull/1064
+ // specs.ActKillProcess ActKillProcess,
+ // specs.ActKillThread ActKillThread,
+ specs.ActErrno: ActErrno,
+ specs.ActTrap: ActTrap,
+ specs.ActAllow: ActAllow,
+ specs.ActTrace: ActTrace,
+ specs.ActLog: ActLog,
+ }
+ specOperatorToSeccompOperatorMap = map[specs.LinuxSeccompOperator]Operator{
+ specs.OpNotEqual: OpNotEqual,
+ specs.OpLessThan: OpLessThan,
+ specs.OpLessEqual: OpLessEqual,
+ specs.OpEqualTo: OpEqualTo,
+ specs.OpGreaterEqual: OpGreaterEqual,
+ specs.OpGreaterThan: OpGreaterThan,
+ specs.OpMaskedEqual: OpMaskedEqual,
+ }
+)
// GoArchToSeccompArch converts a runtime.GOARCH to a seccomp `Arch`. The
// function returns an error if the architecture conversion is not supported.
@@ -30,3 +97,100 @@ func GoArchToSeccompArch(goArch string) (Arch, error) {
}
return arch, nil
}
+
+// specToSeccomp converts a `LinuxSeccomp` spec into a `Seccomp` struct.
+func specToSeccomp(spec *specs.LinuxSeccomp) (*Seccomp, error) {
+ res := &Seccomp{
+ Syscalls: []*Syscall{},
+ }
+
+ for _, arch := range spec.Architectures {
+ newArch, err := specArchToSeccompArch(arch)
+ if err != nil {
+ return nil, errors.Wrap(err, "convert spec arch")
+ }
+ res.Architectures = append(res.Architectures, newArch)
+ }
+
+ // Convert default action
+ newDefaultAction, err := specActionToSeccompAction(spec.DefaultAction)
+ if err != nil {
+ return nil, errors.Wrap(err, "convert default action")
+ }
+ res.DefaultAction = newDefaultAction
+
+ // Loop through all syscall blocks and convert them to the internal format
+ for _, call := range spec.Syscalls {
+ newAction, err := specActionToSeccompAction(call.Action)
+ if err != nil {
+ return nil, errors.Wrap(err, "convert action")
+ }
+
+ for _, name := range call.Names {
+ newCall := Syscall{
+ Name: name,
+ Action: newAction,
+ ErrnoRet: call.ErrnoRet,
+ Args: []*Arg{},
+ }
+
+ // Loop through all the arguments of the syscall and convert them
+ for _, arg := range call.Args {
+ newOp, err := specOperatorToSeccompOperator(arg.Op)
+ if err != nil {
+ return nil, errors.Wrap(err, "convert operator")
+ }
+
+ newArg := Arg{
+ Index: arg.Index,
+ Value: arg.Value,
+ ValueTwo: arg.ValueTwo,
+ Op: newOp,
+ }
+
+ newCall.Args = append(newCall.Args, &newArg)
+ }
+ res.Syscalls = append(res.Syscalls, &newCall)
+ }
+ }
+
+ return res, nil
+}
+
+// specArchToLibseccompArch converts a spec arch into a libseccomp one.
+func specArchToLibseccompArch(arch specs.Arch) (string, error) {
+ if res, ok := specArchToLibseccompArchMap[arch]; ok {
+ return res, nil
+ }
+ return "", errors.Errorf(
+ "architecture %q is not valid for libseccomp", arch,
+ )
+}
+
+// specArchToSeccompArch converts a spec arch into an internal one.
+func specArchToSeccompArch(arch specs.Arch) (Arch, error) {
+ if res, ok := specArchToSeccompArchMap[arch]; ok {
+ return res, nil
+ }
+ return "", errors.Errorf("architecture %q is not valid", arch)
+}
+
+// specActionToSeccompAction converts a spec action into a seccomp one.
+func specActionToSeccompAction(action specs.LinuxSeccompAction) (Action, error) {
+ if res, ok := specActionToSeccompActionMap[action]; ok {
+ return res, nil
+ }
+ return "", errors.Errorf(
+ "spec action %q is not valid internal action", action,
+ )
+}
+
+// specOperatorToSeccompOperator converts a spec operator into a seccomp one.
+func specOperatorToSeccompOperator(operator specs.LinuxSeccompOperator) (Operator, error) {
+ if op, ok := specOperatorToSeccompOperatorMap[operator]; ok {
+ return op, nil
+ }
+ return "", errors.Errorf(
+ "spec operator %q is not a valid internal operator", operator,
+ )
+}
diff --git a/vendor/github.com/containers/common/pkg/seccomp/filter.go b/vendor/github.com/containers/common/pkg/seccomp/filter.go
new file mode 100644
index 000000000..ac9b2698f
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/seccomp/filter.go
@@ -0,0 +1,237 @@
+// +build seccomp
+
+// NOTE: this package has originally been copied from
+// github.com/opencontainers/runc and modified to work for other use cases
+
+package seccomp
+
+import (
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+ libseccomp "github.com/seccomp/libseccomp-golang"
+ "golang.org/x/sys/unix"
+)
+
+// NOTE: this package has originally been copied from
+// github.com/opencontainers/runc and modified to work for other use cases
+
+var (
+ // ErrSpecNil is a possible return error from BuildFilter() and occurs if
+ // the provided spec is nil.
+ ErrSpecNil = errors.New("spec is nil")
+
+ // ErrSpecEmpty is a possible return error from BuildFilter() and occurs if
+ // the provided spec has neither a DefaultAction nor any syscalls.
+ ErrSpecEmpty = errors.New("spec contains neither a default action nor any syscalls")
+)
+
+// BuildFilter does a basic validation for the provided seccomp profile
+// string and returns a filter for it.
+func BuildFilter(spec *specs.LinuxSeccomp) (*libseccomp.ScmpFilter, error) {
+ // Sanity checking to allow consumers to act accordingly
+ if spec == nil {
+ return nil, ErrSpecNil
+ }
+ if spec.DefaultAction == "" && len(spec.Syscalls) == 0 {
+ return nil, ErrSpecEmpty
+ }
+
+ profile, err := specToSeccomp(spec)
+ if err != nil {
+ return nil, errors.Wrap(err, "convert spec to seccomp profile")
+ }
+
+ defaultAction, err := toAction(profile.DefaultAction, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "convert default action %s", profile.DefaultAction)
+ }
+
+ filter, err := libseccomp.NewFilter(defaultAction)
+ if err != nil {
+ return nil, errors.Wrapf(err, "create filter for default action %s", defaultAction)
+ }
+
+ // Add extra architectures
+ for _, arch := range spec.Architectures {
+ libseccompArch, err := specArchToLibseccompArch(arch)
+ if err != nil {
+ return nil, errors.Wrap(err, "convert spec arch")
+ }
+
+ scmpArch, err := libseccomp.GetArchFromString(libseccompArch)
+ if err != nil {
+ return nil, errors.Wrapf(err, "validate Seccomp architecture %s", arch)
+ }
+
+ if err := filter.AddArch(scmpArch); err != nil {
+ return nil, errors.Wrap(err, "add architecture to seccomp filter")
+ }
+ }
+
+ // Unset no new privs bit
+ if err := filter.SetNoNewPrivsBit(false); err != nil {
+ return nil, errors.Wrap(err, "set no new privileges flag")
+ }
+
+ // Add a rule for each syscall
+ for _, call := range profile.Syscalls {
+ if call == nil {
+ return nil, errors.New("encountered nil syscall while initializing seccomp")
+ }
+
+ if err = matchSyscall(filter, call); err != nil {
+ return nil, errors.Wrap(err, "filter matches syscall")
+ }
+ }
+
+ return filter, nil
+}
+
+func matchSyscall(filter *libseccomp.ScmpFilter, call *Syscall) error {
+ if call == nil || filter == nil {
+ return errors.New("cannot use nil as syscall to block")
+ }
+
+ if call.Name == "" {
+ return errors.New("empty string is not a valid syscall")
+ }
+
+ // If we can't resolve the syscall, assume it's not supported on this kernel
+ // Ignore it, don't error out
+ callNum, err := libseccomp.GetSyscallFromName(call.Name)
+ if err != nil {
+ return nil
+ }
+
+ // Convert the call's action to the libseccomp equivalent
+ callAct, err := toAction(call.Action, call.ErrnoRet)
+ if err != nil {
+ return errors.Wrapf(err, "convert action %s", call.Action)
+ }
+
+ // Unconditional match - just add the rule
+ if len(call.Args) == 0 {
+ if err = filter.AddRule(callNum, callAct); err != nil {
+ return errors.Wrapf(err, "add seccomp filter rule for syscall %s", call.Name)
+ }
+ } else {
+ // Linux system calls can have at most 6 arguments
+ const syscallMaxArguments int = 6
+
+ // If two or more arguments have the same condition,
+ // Revert to old behavior, adding each condition as a separate rule
+ argCounts := make([]uint, syscallMaxArguments)
+ conditions := []libseccomp.ScmpCondition{}
+
+ for _, cond := range call.Args {
+ newCond, err := toCondition(cond)
+ if err != nil {
+ return errors.Wrapf(err, "create seccomp syscall condition for syscall %s", call.Name)
+ }
+
+ argCounts[cond.Index] += 1
+
+ conditions = append(conditions, newCond)
+ }
+
+ hasMultipleArgs := false
+ for _, count := range argCounts {
+ if count > 1 {
+ hasMultipleArgs = true
+ break
+ }
+ }
+
+ if hasMultipleArgs {
+ // Revert to old behavior
+ // Add each condition attached to a separate rule
+ for _, cond := range conditions {
+ condArr := []libseccomp.ScmpCondition{cond}
+
+ if err = filter.AddRuleConditional(callNum, callAct, condArr); err != nil {
+ return errors.Wrapf(err, "add seccomp rule for syscall %s", call.Name)
+ }
+ }
+ } else if err = filter.AddRuleConditional(callNum, callAct, conditions); err != nil {
+ // No conditions share same argument
+ // Use new, proper behavior
+ return errors.Wrapf(err, "add seccomp rule for syscall %s", call.Name)
+ }
+ }
+
+ return nil
+}
+
+// toAction converts an internal `Action` type to a `libseccomp.ScmpAction`
+// type.
+func toAction(act Action, errnoRet *uint) (libseccomp.ScmpAction, error) {
+ switch act {
+ case ActKill:
+ return libseccomp.ActKill, nil
+ case ActKillProcess:
+ return libseccomp.ActKillProcess, nil
+ case ActErrno:
+ if errnoRet != nil {
+ return libseccomp.ActErrno.SetReturnCode(int16(*errnoRet)), nil
+ }
+ return libseccomp.ActErrno.SetReturnCode(int16(unix.EPERM)), nil
+ case ActTrap:
+ return libseccomp.ActTrap, nil
+ case ActAllow:
+ return libseccomp.ActAllow, nil
+ case ActTrace:
+ if errnoRet != nil {
+ return libseccomp.ActTrace.SetReturnCode(int16(*errnoRet)), nil
+ }
+ return libseccomp.ActTrace.SetReturnCode(int16(unix.EPERM)), nil
+ case ActLog:
+ return libseccomp.ActLog, nil
+ default:
+ return libseccomp.ActInvalid, errors.Errorf("invalid action %s", act)
+ }
+}
+
+// toCondition converts an internal `Arg` type to a `libseccomp.ScmpCondition`
+// type.
+func toCondition(arg *Arg) (cond libseccomp.ScmpCondition, err error) {
+ if arg == nil {
+ return cond, errors.New("cannot convert nil to syscall condition")
+ }
+
+ op, err := toCompareOp(arg.Op)
+ if err != nil {
+ return cond, errors.Wrap(err, "convert compare operator")
+ }
+
+ condition, err := libseccomp.MakeCondition(
+ arg.Index, op, arg.Value, arg.ValueTwo,
+ )
+ if err != nil {
+ return cond, errors.Wrap(err, "make condition")
+ }
+
+ return condition, nil
+}
+
+// toCompareOp converts an internal `Operator` type to a
+// `libseccomp.ScmpCompareOp`.
+func toCompareOp(op Operator) (libseccomp.ScmpCompareOp, error) {
+ switch op {
+ case OpEqualTo:
+ return libseccomp.CompareEqual, nil
+ case OpNotEqual:
+ return libseccomp.CompareNotEqual, nil
+ case OpGreaterThan:
+ return libseccomp.CompareGreater, nil
+ case OpGreaterEqual:
+ return libseccomp.CompareGreaterEqual, nil
+ case OpLessThan:
+ return libseccomp.CompareLess, nil
+ case OpLessEqual:
+ return libseccomp.CompareLessOrEqual, nil
+ case OpMaskedEqual:
+ return libseccomp.CompareMaskedEqual, nil
+ default:
+ return libseccomp.CompareInvalid, errors.Errorf("invalid operator %s", op)
+ }
+}
diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go
index 5655a7572..19500cc97 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go
@@ -122,7 +122,7 @@ Loop:
}
if len(call.Excludes.Caps) > 0 {
for _, c := range call.Excludes.Caps {
- if inSlice(rs.Process.Capabilities.Bounding, c) {
+ if rs != nil && rs.Process != nil && rs.Process.Capabilities != nil && inSlice(rs.Process.Capabilities.Bounding, c) {
continue Loop
}
}
@@ -134,7 +134,7 @@ Loop:
}
if len(call.Includes.Caps) > 0 {
for _, c := range call.Includes.Caps {
- if !inSlice(rs.Process.Capabilities.Bounding, c) {
+ if rs != nil && rs.Process != nil && rs.Process.Capabilities != nil && !inSlice(rs.Process.Capabilities.Bounding, c) {
continue Loop
}
}
diff --git a/vendor/github.com/containers/common/pkg/seccomp/validate.go b/vendor/github.com/containers/common/pkg/seccomp/validate.go
new file mode 100644
index 000000000..1c5c4edc6
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/seccomp/validate.go
@@ -0,0 +1,29 @@
+// +build seccomp
+
+package seccomp
+
+import (
+ "encoding/json"
+
+ "github.com/pkg/errors"
+)
+
+// ValidateProfile does a basic validation for the provided seccomp profile
+// string.
+func ValidateProfile(content string) error {
+ profile := &Seccomp{}
+ if err := json.Unmarshal([]byte(content), &profile); err != nil {
+ return errors.Wrap(err, "decoding seccomp profile")
+ }
+
+ spec, err := setupSeccomp(profile, nil)
+ if err != nil {
+ return errors.Wrap(err, "create seccomp spec")
+ }
+
+ if _, err := BuildFilter(spec); err != nil {
+ return errors.Wrap(err, "build seccomp filter")
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go
index ef9a947f0..1f05ea3d9 100644
--- a/vendor/github.com/containers/common/version/version.go
+++ b/vendor/github.com/containers/common/version/version.go
@@ -1,4 +1,4 @@
package version
// Version is the version of the build.
-const Version = "0.20.4-dev"
+const Version = "0.21.0"
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index 7482fb458..873bdc67f 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -377,7 +377,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
if len(sigs) != 0 {
c.Printf("Checking if image list destination supports signatures\n")
if err := c.dest.SupportsSignatures(ctx); err != nil {
- return nil, "", errors.Wrap(err, "Can not copy signatures")
+ return nil, "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
}
}
canModifyManifestList := (len(sigs) == 0)
@@ -595,7 +595,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
if len(sigs) != 0 {
c.Printf("Checking if image destination supports signatures\n")
if err := c.dest.SupportsSignatures(ctx); err != nil {
- return nil, "", "", errors.Wrap(err, "Can not copy signatures")
+ return nil, "", "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
}
}
diff --git a/vendor/github.com/containers/image/v5/docker/archive/dest.go b/vendor/github.com/containers/image/v5/docker/archive/dest.go
index 1cf197429..e874e02e0 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/dest.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/dest.go
@@ -3,9 +3,8 @@ package archive
import (
"context"
"io"
- "os"
- "github.com/containers/image/v5/docker/tarfile"
+ "github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
@@ -13,37 +12,38 @@ import (
type archiveImageDestination struct {
*tarfile.Destination // Implements most of types.ImageDestination
ref archiveReference
- writer io.Closer
+ archive *tarfile.Writer // Should only be closed if writer != nil
+ writer io.Closer // May be nil if the archive is shared
}
func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) {
- // ref.path can be either a pipe or a regular file
- // in the case of a pipe, we require that we can open it for write
- // in the case of a regular file, we don't want to overwrite any pre-existing file
- // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
- // only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
- fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_CREATE, 0644)
- if err != nil {
- return nil, errors.Wrapf(err, "error opening file %q", ref.path)
+ if ref.sourceIndex != -1 {
+ return nil, errors.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
}
- fhStat, err := fh.Stat()
- if err != nil {
- return nil, errors.Wrapf(err, "error statting file %q", ref.path)
- }
+ var archive *tarfile.Writer
+ var writer io.Closer
+ if ref.archiveWriter != nil {
+ archive = ref.archiveWriter
+ writer = nil
+ } else {
+ fh, err := openArchiveForWriting(ref.path)
+ if err != nil {
+ return nil, err
+ }
- if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
- return nil, errors.New("docker-archive doesn't support modifying existing images")
+ archive = tarfile.NewWriter(fh)
+ writer = fh
}
-
- tarDest := tarfile.NewDestinationWithContext(sys, fh, ref.destinationRef)
+ tarDest := tarfile.NewDestination(sys, archive, ref.ref)
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
}
return &archiveImageDestination{
Destination: tarDest,
ref: ref,
- writer: fh,
+ archive: archive,
+ writer: writer,
}, nil
}
@@ -60,7 +60,10 @@ func (d *archiveImageDestination) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *archiveImageDestination) Close() error {
- return d.writer.Close()
+ if d.writer != nil {
+ return d.writer.Close()
+ }
+ return nil
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
@@ -68,5 +71,8 @@ func (d *archiveImageDestination) Close() error {
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
- return d.Destination.Commit(ctx)
+ if d.writer != nil {
+ return d.archive.Close()
+ }
+ return nil
}
diff --git a/vendor/github.com/containers/image/v5/docker/archive/reader.go b/vendor/github.com/containers/image/v5/docker/archive/reader.go
new file mode 100644
index 000000000..c7bb311bc
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/archive/reader.go
@@ -0,0 +1,120 @@
+package archive
+
+import (
+ "github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/pkg/errors"
+)
+
+// Reader manages a single Docker archive, allows listing its contents and accessing
+// individual images with less overhead than creating image references individually
+// (because the archive is, if necessary, copied or decompressed only once).
+type Reader struct {
+ path string // The original, user-specified path; not the maintained temporary file, if any
+ archive *tarfile.Reader
+}
+
+// NewReader returns a Reader for path.
+// The caller should call .Close() on the returned object.
+func NewReader(sys *types.SystemContext, path string) (*Reader, error) {
+ archive, err := tarfile.NewReaderFromFile(sys, path)
+ if err != nil {
+ return nil, err
+ }
+ return &Reader{
+ path: path,
+ archive: archive,
+ }, nil
+}
+
+// Close deletes temporary files associated with the Reader, if any.
+func (r *Reader) Close() error {
+ return r.archive.Close()
+}
+
+// NewReaderForReference creates a Reader from a Reader-independent imageReference, which must be from docker/archive.Transport,
+// and a variant of imageReference that points at the same image within the reader.
+// The caller should call .Close() on the returned Reader.
+func NewReaderForReference(sys *types.SystemContext, ref types.ImageReference) (*Reader, types.ImageReference, error) {
+ standalone, ok := ref.(archiveReference)
+ if !ok {
+ return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
+ }
+ if standalone.archiveReader != nil {
+ return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport())
+ }
+ reader, err := NewReader(sys, standalone.path)
+ if err != nil {
+ return nil, nil, err
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ reader.Close()
+ }
+ }()
+ readerRef, err := newReference(standalone.path, standalone.ref, standalone.sourceIndex, reader.archive, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ succeeded = true
+ return reader, readerRef, nil
+}
+
+// List returns the a set of references for images in the Reader,
+// grouped by the image the references point to.
+// The references are valid only until the Reader is closed.
+func (r *Reader) List() ([][]types.ImageReference, error) {
+ res := [][]types.ImageReference{}
+ for imageIndex, image := range r.archive.Manifest {
+ refs := []types.ImageReference{}
+ for _, tag := range image.RepoTags {
+ parsedTag, err := reference.ParseNormalizedNamed(tag)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Invalid tag %#v in manifest item @%d", tag, imageIndex)
+ }
+ nt, ok := parsedTag.(reference.NamedTagged)
+ if !ok {
+ return nil, errors.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String())
+ }
+ ref, err := newReference(r.path, nt, -1, r.archive, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error creating a reference for tag %#v in manifest item @%d", tag, imageIndex)
+ }
+ refs = append(refs, ref)
+ }
+ if len(refs) == 0 {
+ ref, err := newReference(r.path, nil, imageIndex, r.archive, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error creating a reference for manifest item @%d", imageIndex)
+ }
+ refs = append(refs, ref)
+ }
+ res = append(res, refs)
+ }
+ return res, nil
+}
+
+// ManifestTagsForReference returns the set of tags “matching” ref in reader, as strings
+// (i.e. exposing the short names before normalization).
+// The function reports an error if ref does not identify a single image.
+// If ref contains a NamedTagged reference, only a single tag “matching” ref is returned;
+// If ref contains a source index, or neither a NamedTagged nor a source index, all tags
+// matching the image are returned.
+// Almost all users should use List() or ImageReference.DockerReference() instead.
+func (r *Reader) ManifestTagsForReference(ref types.ImageReference) ([]string, error) {
+ archiveRef, ok := ref.(archiveReference)
+ if !ok {
+ return nil, errors.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
+ }
+ manifestItem, tagIndex, err := r.archive.ChooseManifestItem(archiveRef.ref, archiveRef.sourceIndex)
+ if err != nil {
+ return nil, err
+ }
+ if tagIndex != -1 {
+ return []string{manifestItem.RepoTags[tagIndex]}, nil
+ }
+ return manifestItem.RepoTags, nil
+}
diff --git a/vendor/github.com/containers/image/v5/docker/archive/src.go b/vendor/github.com/containers/image/v5/docker/archive/src.go
index 6a628508d..7acca210e 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/src.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/src.go
@@ -3,9 +3,8 @@ package archive
import (
"context"
- "github.com/containers/image/v5/docker/tarfile"
+ "github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/types"
- "github.com/sirupsen/logrus"
)
type archiveImageSource struct {
@@ -16,13 +15,20 @@ type archiveImageSource struct {
// newImageSource returns a types.ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (types.ImageSource, error) {
- if ref.destinationRef != nil {
- logrus.Warnf("docker-archive: references are not supported for sources (ignoring)")
- }
- src, err := tarfile.NewSourceFromFileWithContext(sys, ref.path)
- if err != nil {
- return nil, err
+ var archive *tarfile.Reader
+ var closeArchive bool
+ if ref.archiveReader != nil {
+ archive = ref.archiveReader
+ closeArchive = false
+ } else {
+ a, err := tarfile.NewReaderFromFile(sys, ref.path)
+ if err != nil {
+ return nil, err
+ }
+ archive = a
+ closeArchive = true
}
+ src := tarfile.NewSource(archive, closeArchive, ref.ref, ref.sourceIndex)
return &archiveImageSource{
Source: src,
ref: ref,
diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go
index 26bc687e0..ff9e27482 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/transport.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go
@@ -3,8 +3,10 @@ package archive
import (
"context"
"fmt"
+ "strconv"
"strings"
+ "github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
ctrImage "github.com/containers/image/v5/image"
"github.com/containers/image/v5/transports"
@@ -42,9 +44,16 @@ func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error {
// archiveReference is an ImageReference for Docker images.
type archiveReference struct {
path string
- // only used for destinations,
- // archiveReference.destinationRef is optional and can be nil for destinations as well.
- destinationRef reference.NamedTagged
+ // May be nil to read the only image in an archive, or to create an untagged image.
+ ref reference.NamedTagged
+ // If not -1, a zero-based index of the image in the manifest. Valid only for sources.
+ // Must not be set if ref is set.
+ sourceIndex int
+ // If not nil, must have been created from path (but archiveReader.path may point at a temporary
+ // file, not necesarily path precisely).
+ archiveReader *tarfile.Reader
+ // If not nil, must have been created for path
+ archiveWriter *tarfile.Writer
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
@@ -55,37 +64,69 @@ func ParseReference(refString string) (types.ImageReference, error) {
parts := strings.SplitN(refString, ":", 2)
path := parts[0]
- var destinationRef reference.NamedTagged
+ var nt reference.NamedTagged
+ sourceIndex := -1
- // A :tag was specified, which is only necessary for destinations.
if len(parts) == 2 {
- ref, err := reference.ParseNormalizedNamed(parts[1])
- if err != nil {
- return nil, errors.Wrapf(err, "docker-archive parsing reference")
+ // A :tag or :@index was specified.
+ if len(parts[1]) > 0 && parts[1][0] == '@' {
+ i, err := strconv.Atoi(parts[1][1:])
+ if err != nil {
+ return nil, errors.Wrapf(err, "Invalid source index %s", parts[1])
+ }
+ if i < 0 {
+ return nil, errors.Errorf("Invalid source index @%d: must not be negative", i)
+ }
+ sourceIndex = i
+ } else {
+ ref, err := reference.ParseNormalizedNamed(parts[1])
+ if err != nil {
+ return nil, errors.Wrapf(err, "docker-archive parsing reference")
+ }
+ ref = reference.TagNameOnly(ref)
+ refTagged, isTagged := ref.(reference.NamedTagged)
+ if !isTagged { // If ref contains a digest, TagNameOnly does not change it
+ return nil, errors.Errorf("reference does not include a tag: %s", ref.String())
+ }
+ nt = refTagged
}
- ref = reference.TagNameOnly(ref)
- refTagged, isTagged := ref.(reference.NamedTagged)
- if !isTagged {
- // Really shouldn't be hit...
- return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString)
- }
- destinationRef = refTagged
}
- return NewReference(path, destinationRef)
+ return newReference(path, nt, sourceIndex, nil, nil)
+}
+
+// NewReference returns a Docker archive reference for a path and an optional reference.
+func NewReference(path string, ref reference.NamedTagged) (types.ImageReference, error) {
+ return newReference(path, ref, -1, nil, nil)
}
-// NewReference rethrns a Docker archive reference for a path and an optional destination reference.
-func NewReference(path string, destinationRef reference.NamedTagged) (types.ImageReference, error) {
+// NewIndexReference returns a Docker archive reference for a path and a zero-based source manifest index.
+func NewIndexReference(path string, sourceIndex int) (types.ImageReference, error) {
+ return newReference(path, nil, sourceIndex, nil, nil)
+}
+
+// newReference returns a docker archive reference for a path, an optional reference or sourceIndex,
+// and optionally a tarfile.Reader and/or a tarfile.Writer matching path.
+func newReference(path string, ref reference.NamedTagged, sourceIndex int,
+ archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) {
if strings.Contains(path, ":") {
return nil, errors.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
}
- if _, isDigest := destinationRef.(reference.Canonical); isDigest {
- return nil, errors.Errorf("docker-archive doesn't support digest references: %s", destinationRef.String())
+ if ref != nil && sourceIndex != -1 {
+ return nil, errors.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index")
+ }
+ if _, isDigest := ref.(reference.Canonical); isDigest {
+ return nil, errors.Errorf("docker-archive doesn't support digest references: %s", ref.String())
+ }
+ if sourceIndex != -1 && sourceIndex < 0 {
+ return nil, errors.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex)
}
return archiveReference{
- path: path,
- destinationRef: destinationRef,
+ path: path,
+ ref: ref,
+ sourceIndex: sourceIndex,
+ archiveReader: archiveReader,
+ archiveWriter: archiveWriter,
}, nil
}
@@ -99,17 +140,21 @@ func (ref archiveReference) Transport() types.ImageTransport {
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref archiveReference) StringWithinTransport() string {
- if ref.destinationRef == nil {
+ switch {
+ case ref.ref != nil:
+ return fmt.Sprintf("%s:%s", ref.path, ref.ref.String())
+ case ref.sourceIndex != -1:
+ return fmt.Sprintf("%s:@%d", ref.path, ref.sourceIndex)
+ default:
return ref.path
}
- return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String())
}
// DockerReference returns a Docker reference associated with this reference
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
func (ref archiveReference) DockerReference() reference.Named {
- return ref.destinationRef
+ return ref.ref
}
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
diff --git a/vendor/github.com/containers/image/v5/docker/archive/writer.go b/vendor/github.com/containers/image/v5/docker/archive/writer.go
new file mode 100644
index 000000000..afac2aaee
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/archive/writer.go
@@ -0,0 +1,82 @@
+package archive
+
+import (
+ "io"
+ "os"
+
+ "github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ "github.com/pkg/errors"
+)
+
+// Writer manages a single in-progress Docker archive and allows adding images to it.
+type Writer struct {
+ path string // The original, user-specified path; not the maintained temporary file, if any
+ archive *tarfile.Writer
+ writer io.Closer
+}
+
+// NewWriter returns a Writer for path.
+// The caller should call .Close() on the returned object.
+func NewWriter(sys *types.SystemContext, path string) (*Writer, error) {
+ fh, err := openArchiveForWriting(path)
+ if err != nil {
+ return nil, err
+ }
+ archive := tarfile.NewWriter(fh)
+
+ return &Writer{
+ path: path,
+ archive: archive,
+ writer: fh,
+ }, nil
+}
+
+// Close writes all outstanding data about images to the archive, and
+// releases state associated with the Writer, if any.
+// No more images can be added after this is called.
+func (w *Writer) Close() error {
+ err := w.archive.Close()
+ if err2 := w.writer.Close(); err2 != nil && err == nil {
+ err = err2
+ }
+ return err
+}
+
+// NewReference returns an ImageReference that allows adding an image to Writer,
+// with an optional reference.
+func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) {
+ return newReference(w.path, destinationRef, -1, nil, w.archive)
+}
+
+// openArchiveForWriting opens path for writing a tar archive,
+// making a few sanity checks.
+func openArchiveForWriting(path string) (*os.File, error) {
+ // path can be either a pipe or a regular file
+ // in the case of a pipe, we require that we can open it for write
+ // in the case of a regular file, we don't want to overwrite any pre-existing file
+ // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
+ // only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
+ fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error opening file %q", path)
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ fh.Close()
+ }
+ }()
+ fhStat, err := fh.Stat()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error statting file %q", path)
+ }
+
+ if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
+ return nil, errors.New("docker-archive doesn't support modifying existing images")
+ }
+
+ succeeded = true
+ return fh, nil
+}
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
index c6afd4bde..88609b3dc 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
@@ -4,8 +4,8 @@ import (
"context"
"io"
+ "github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/docker/tarfile"
"github.com/containers/image/v5/types"
"github.com/docker/docker/client"
"github.com/pkg/errors"
@@ -16,6 +16,7 @@ type daemonImageDestination struct {
ref daemonReference
mustMatchRuntimeOS bool
*tarfile.Destination // Implements most of types.ImageDestination
+ archive *tarfile.Writer
// For talking to imageLoadGoroutine
goroutineCancel context.CancelFunc
statusChannel <-chan error
@@ -45,6 +46,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
}
reader, writer := io.Pipe()
+ archive := tarfile.NewWriter(writer)
// Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it.
statusChannel := make(chan error, 1)
@@ -54,7 +56,8 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
return &daemonImageDestination{
ref: ref,
mustMatchRuntimeOS: mustMatchRuntimeOS,
- Destination: tarfile.NewDestinationWithContext(sys, writer, namedTaggedRef),
+ Destination: tarfile.NewDestination(sys, archive, namedTaggedRef),
+ archive: archive,
goroutineCancel: goroutineCancel,
statusChannel: statusChannel,
writer: writer,
@@ -130,7 +133,7 @@ func (d *daemonImageDestination) Reference() types.ImageReference {
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
logrus.Debugf("docker-daemon: Closing tar stream")
- if err := d.Destination.Commit(ctx); err != nil {
+ if err := d.archive.Close(); err != nil {
return err
}
if err := d.writer.Close(); err != nil {
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
index 1827f811d..74a678817 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
@@ -3,7 +3,7 @@ package daemon
import (
"context"
- "github.com/containers/image/v5/docker/tarfile"
+ "github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
@@ -35,10 +35,11 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef
}
defer inputStream.Close()
- src, err := tarfile.NewSourceFromStreamWithSystemContext(sys, inputStream)
+ archive, err := tarfile.NewReaderFromStream(sys, inputStream)
if err != nil {
return nil, err
}
+ src := tarfile.NewSource(archive, true, nil, -1)
return &daemonImageSource{
ref: ref,
Source: src,
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
index 979100ee3..576dec495 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
@@ -22,7 +22,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/docker/distribution/registry/api/errcode"
v2 "github.com/docker/distribution/registry/api/v2"
- "github.com/docker/distribution/registry/client"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
@@ -154,7 +153,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
defer res.Body.Close()
if res.StatusCode != http.StatusAccepted {
logrus.Debugf("Error initiating layer upload, response %#v", *res)
- return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
+ return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
}
uploadLocation, err := res.Location()
if err != nil {
@@ -175,7 +174,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
- return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer chunked")
+ return nil, errors.Wrapf(registryHTTPResponseToError(res), "Error uploading layer chunked")
}
uploadLocation, err := res.Location()
if err != nil {
@@ -201,7 +200,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
logrus.Debugf("Error uploading layer, response %#v", *res)
- return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation)
+ return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "Error uploading layer to %s", uploadLocation)
}
logrus.Debugf("Upload of layer %s complete", computedDigest)
@@ -226,7 +225,7 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.
return true, getBlobSize(res), nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
- return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
+ return false, -1, errors.Wrapf(registryHTTPResponseToError(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
case http.StatusNotFound:
logrus.Debugf("... not present")
return false, -1, nil
@@ -277,7 +276,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
default:
logrus.Debugf("Error mounting, response %#v", *res)
- return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
+ return errors.Wrapf(registryHTTPResponseToError(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
}
}
@@ -414,7 +413,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
- err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
+ err = errors.Wrapf(registryHTTPResponseToError(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
if isManifestInvalidError(errors.Cause(err)) {
err = types.ManifestTypeRejectedError{Err: err}
}
@@ -641,7 +640,7 @@ sigExists:
logrus.Debugf("Error body %s", string(body))
}
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
- return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s in %s", path, d.c.registry)
+ return errors.Wrapf(registryHTTPResponseToError(res), "Error uploading signature to %s in %s", path, d.c.registry)
}
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
index 55eb38824..4d2a9ed6c 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
@@ -17,7 +17,6 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
- "github.com/docker/distribution/registry/client"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -193,7 +192,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
- return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
+ return nil, "", errors.Wrapf(registryHTTPResponseToError(res), "Error reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
}
manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
@@ -235,6 +234,9 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
resp *http.Response
err error
)
+ if len(urls) == 0 {
+ return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
+ }
for _, url := range urls {
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
if err == nil {
diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go
index f626cc7da..5b5008af7 100644
--- a/vendor/github.com/containers/image/v5/docker/errors.go
+++ b/vendor/github.com/containers/image/v5/docker/errors.go
@@ -44,3 +44,17 @@ func httpResponseToError(res *http.Response, context string) error {
return perrors.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
}
}
+
+// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution
+// registry
+func registryHTTPResponseToError(res *http.Response) error {
+ errResponse := client.HandleErrorResponse(res)
+ if e, ok := perrors.Cause(errResponse).(*client.UnexpectedHTTPResponseError); ok {
+ response := string(e.Response)
+ if len(response) > 50 {
+ response = response[:50] + "..."
+ }
+ errResponse = fmt.Errorf("StatusCode: %d, %s", e.StatusCode, response)
+ }
+ return errResponse
+}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
new file mode 100644
index 000000000..8c38094cf
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
@@ -0,0 +1,217 @@
+package tarfile
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "os"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
+type Destination struct {
+ archive *Writer
+ repoTags []reference.NamedTagged
+ // Other state.
+ config []byte
+ sysCtx *types.SystemContext
+}
+
+// NewDestination returns a tarfile.Destination adding images to the specified Writer.
+func NewDestination(sys *types.SystemContext, archive *Writer, ref reference.NamedTagged) *Destination {
+ repoTags := []reference.NamedTagged{}
+ if ref != nil {
+ repoTags = append(repoTags, ref)
+ }
+ return &Destination{
+ archive: archive,
+ repoTags: repoTags,
+ sysCtx: sys,
+ }
+}
+
+// AddRepoTags adds the specified tags to the destination's repoTags.
+func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
+ d.repoTags = append(d.repoTags, tags...)
+}
+
+// SupportedManifestMIMETypes tells which manifest mime types the destination supports
+// If an empty slice or nil it's returned, then any mime type can be tried to upload
+func (d *Destination) SupportedManifestMIMETypes() []string {
+ return []string{
+ manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
+ }
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (d *Destination) SupportsSignatures(ctx context.Context) error {
+ return errors.Errorf("Storing signatures for docker tar files is not supported")
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *Destination) AcceptsForeignLayerURLs() bool {
+ return false
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
+func (d *Destination) MustMatchRuntimeOS() bool {
+ return false
+}
+
+// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+// and would prefer to receive an unmodified manifest instead of one modified for the destination.
+// Does not make a difference if Reference().DockerReference() is nil.
+func (d *Destination) IgnoresEmbeddedDockerReference() bool {
+ return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
+}
+
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (d *Destination) HasThreadSafePutBlob() bool {
+ // The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where
+ // this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t
+ // much benefit from concurrency, mostly just extra CPU, memory and I/O contention.
+ return false
+}
+
+// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ // Ouch, we need to stream the blob into a temporary file just to determine the size.
+ // When the layer is decompressed, we also have to generate the digest on uncompressed datas.
+ if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
+ logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
+ streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ defer os.Remove(streamCopy.Name())
+ defer streamCopy.Close()
+
+ digester := digest.Canonical.Digester()
+ tee := io.TeeReader(stream, digester.Hash())
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ size, err := io.Copy(streamCopy, tee)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ _, err = streamCopy.Seek(0, io.SeekStart)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
+ if inputInfo.Digest == "" {
+ inputInfo.Digest = digester.Digest()
+ }
+ stream = streamCopy
+ logrus.Debugf("... streaming done")
+ }
+
+ if err := d.archive.lock(); err != nil {
+ return types.BlobInfo{}, err
+ }
+ defer d.archive.unlock()
+
+ // Maybe the blob has been already sent
+ ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ if ok {
+ return reusedInfo, nil
+ }
+
+ if isConfig {
+ buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream")
+ }
+ d.config = buf
+ if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
+ return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file")
+ }
+ } else {
+ if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
+ return types.BlobInfo{}, err
+ }
+ }
+ d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size})
+ return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ if err := d.archive.lock(); err != nil {
+ return false, types.BlobInfo{}, err
+ }
+ defer d.archive.unlock()
+
+ return d.archive.tryReusingBlobLocked(info)
+}
+
+// PutManifest writes manifest to the destination.
+// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
+// there can be no secondary manifests.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
+ if instanceDigest != nil {
+ return errors.New(`Manifest lists are not supported for docker tar files`)
+ }
+ // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
+ // so the caller trying a different manifest kind would be pointless.
+ var man manifest.Schema2
+ if err := json.Unmarshal(m, &man); err != nil {
+ return errors.Wrap(err, "Error parsing manifest")
+ }
+ if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
+ return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
+ }
+
+ if err := d.archive.lock(); err != nil {
+ return err
+ }
+ defer d.archive.unlock()
+
+ if err := d.archive.writeLegacyMetadataLocked(man.LayersDescriptors, d.config, d.repoTags); err != nil {
+ return err
+ }
+
+ return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags)
+}
+
+// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
+// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
+// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
+func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+ if instanceDigest != nil {
+ return errors.Errorf(`Manifest lists are not supported for docker tar files`)
+ }
+ if len(signatures) != 0 {
+ return errors.Errorf("Storing signatures for docker tar files is not supported")
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
new file mode 100644
index 000000000..83de0c520
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
@@ -0,0 +1,269 @@
+package tarfile
+
+import (
+ "archive/tar"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/types"
+ "github.com/pkg/errors"
+)
+
+// Reader is a ((docker save)-formatted) tar archive that allows random access to any component.
+type Reader struct {
+ // None of the fields below are modified after the archive is created, until .Close();
+ // this allows concurrent readers of the same archive.
+ path string // "" if the archive has already been closed.
+ removeOnClose bool // Remove file on close if true
+ Manifest []ManifestItem // Guaranteed to exist after the archive is created.
+}
+
+// NewReaderFromFile returns a Reader for the specified path.
+// The caller should call .Close() on the returned archive when done.
+func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
+ file, err := os.Open(path)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error opening file %q", path)
+ }
+ defer file.Close()
+
+ // If the file is already not compressed we can just return the file itself
+ // as a source. Otherwise we pass the stream to NewReaderFromStream.
+ stream, isCompressed, err := compression.AutoDecompress(file)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error detecting compression for file %q", path)
+ }
+ defer stream.Close()
+ if !isCompressed {
+ return newReader(path, false)
+ }
+ return NewReaderFromStream(sys, stream)
+}
+
+// NewReaderFromStream returns a Reader for the specified inputStream,
+// which can be either compressed or uncompressed. The caller can close the
+// inputStream immediately after NewReaderFromFile returns.
+// The caller should call .Close() on the returned archive when done.
+func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
+ // Save inputStream to a temporary file
+ tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
+ if err != nil {
+ return nil, errors.Wrap(err, "error creating temporary file")
+ }
+ defer tarCopyFile.Close()
+
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ os.Remove(tarCopyFile.Name())
+ }
+ }()
+
+ // In order to be compatible with docker-load, we need to support
+ // auto-decompression (it's also a nice quality-of-life thing to avoid
+ // giving users really confusing "invalid tar header" errors).
+ uncompressedStream, _, err := compression.AutoDecompress(inputStream)
+ if err != nil {
+ return nil, errors.Wrap(err, "Error auto-decompressing input")
+ }
+ defer uncompressedStream.Close()
+
+ // Copy the plain archive to the temporary file.
+ //
+ // TODO: This can take quite some time, and should ideally be cancellable
+ // using a context.Context.
+ if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
+ return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name())
+ }
+ succeeded = true
+
+ return newReader(tarCopyFile.Name(), true)
+}
+
+// newReader creates a Reader for the specified path and removeOnClose flag.
+// The caller should call .Close() on the returned archive when done.
+func newReader(path string, removeOnClose bool) (*Reader, error) {
+ // This is a valid enough archive, except Manifest is not yet filled.
+ r := Reader{
+ path: path,
+ removeOnClose: removeOnClose,
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ r.Close()
+ }
+ }()
+
+ // We initialize Manifest immediately when constructing the Reader instead
+ // of later on-demand because every caller will need the data, and because doing it now
+ // removes the need to synchronize the access/creation of the data if the archive is later
+ // used from multiple goroutines to access different images.
+
+ // FIXME? Do we need to deal with the legacy format?
+ bytes, err := r.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize)
+ if err != nil {
+ return nil, err
+ }
+ if err := json.Unmarshal(bytes, &r.Manifest); err != nil {
+ return nil, errors.Wrap(err, "Error decoding tar manifest.json")
+ }
+
+ succeeded = true
+ return &r, nil
+}
+
+// Close removes resources associated with an initialized Reader, if any.
+func (r *Reader) Close() error {
+ path := r.path
+ r.path = "" // Mark the archive as closed
+ if r.removeOnClose {
+ return os.Remove(path)
+ }
+ return nil
+}
+
+// ChooseManifestItem selects a manifest item from r.Manifest matching (ref, sourceIndex), one or
+// both of which should be (nil, -1).
+// On success, it returns the manifest item and an index of the matching tag, if a tag was used
+// for matching; the index is -1 if a tag was not used.
+func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) (*ManifestItem, int, error) {
+ switch {
+ case ref != nil && sourceIndex != -1:
+ return nil, -1, errors.Errorf("Internal error: Cannot have both ref %s and source index @%d",
+ ref.String(), sourceIndex)
+
+ case ref != nil:
+ refString := ref.String()
+ for i := range r.Manifest {
+ for tagIndex, tag := range r.Manifest[i].RepoTags {
+ parsedTag, err := reference.ParseNormalizedNamed(tag)
+ if err != nil {
+ return nil, -1, errors.Wrapf(err, "Invalid tag %#v in manifest.json item @%d", tag, i)
+ }
+ if parsedTag.String() == refString {
+ return &r.Manifest[i], tagIndex, nil
+ }
+ }
+ }
+ return nil, -1, errors.Errorf("Tag %#v not found", refString)
+
+ case sourceIndex != -1:
+ if sourceIndex >= len(r.Manifest) {
+ return nil, -1, errors.Errorf("Invalid source index @%d, only %d manifest items available",
+ sourceIndex, len(r.Manifest))
+ }
+ return &r.Manifest[sourceIndex], -1, nil
+
+ default:
+ if len(r.Manifest) != 1 {
+ return nil, -1, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest))
+ }
+ return &r.Manifest[0], -1, nil
+ }
+}
+
+// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
+type tarReadCloser struct {
+ *tar.Reader
+ backingFile *os.File
+}
+
+func (t *tarReadCloser) Close() error {
+ return t.backingFile.Close()
+}
+
+// openTarComponent returns a ReadCloser for the specific file within the archive.
+// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
+// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
+// It is safe to call this method from multiple goroutines simultaneously.
+// The caller should call .Close() on the returned stream.
+func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) {
+ // This is only a sanity check; if anyone did concurrently close ra, this access is technically
+ // racy against the write in .Close().
+ if r.path == "" {
+ return nil, errors.New("Internal error: trying to read an already closed tarfile.Reader")
+ }
+
+ f, err := os.Open(r.path)
+ if err != nil {
+ return nil, err
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ f.Close()
+ }
+ }()
+
+ tarReader, header, err := findTarComponent(f, componentPath)
+ if err != nil {
+ return nil, err
+ }
+ if header == nil {
+ return nil, os.ErrNotExist
+ }
+ if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
+ // We follow only one symlink; so no loops are possible.
+ if _, err := f.Seek(0, io.SeekStart); err != nil {
+ return nil, err
+ }
+ // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
+ // so we don't care.
+ tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
+ if err != nil {
+ return nil, err
+ }
+ if header == nil {
+ return nil, os.ErrNotExist
+ }
+ }
+
+ if !header.FileInfo().Mode().IsRegular() {
+ return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
+ }
+ succeeded = true
+ return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
+}
+
+// findTarComponent returns a header and a reader matching componentPath within inputFile,
+// or (nil, nil, nil) if not found.
+func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *tar.Header, error) {
+ t := tar.NewReader(inputFile)
+ componentPath = path.Clean(componentPath)
+ for {
+ h, err := t.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ if path.Clean(h.Name) == componentPath {
+ return t, h, nil
+ }
+ }
+ return nil, nil, nil
+}
+
+// readTarComponent returns full contents of componentPath.
+// It is safe to call this method from multiple goroutines simultaneously.
+func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
+ file, err := r.openTarComponent(path)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error loading tar component %s", path)
+ }
+ defer file.Close()
+ bytes, err := iolimits.ReadAtMost(file, limit)
+ if err != nil {
+ return nil, err
+ }
+ return bytes, nil
+}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
new file mode 100644
index 000000000..0db9a72b5
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
@@ -0,0 +1,331 @@
+package tarfile
+
+import (
+ "archive/tar"
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "sync"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// Source is a partial implementation of types.ImageSource for reading from tarPath.
+type Source struct {
+ archive *Reader
+ closeArchive bool // .Close() the archive when the source is closed.
+ // If ref is nil and sourceIndex is -1, indicates the only image in the archive.
+ ref reference.NamedTagged // May be nil
+ sourceIndex int // May be -1
+ // The following data is only available after ensureCachedDataIsPresent() succeeds
+ tarManifest *ManifestItem // nil if not available yet.
+ configBytes []byte
+ configDigest digest.Digest
+ orderedDiffIDList []digest.Digest
+ knownLayers map[digest.Digest]*layerInfo
+ // Other state
+ generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
+ cacheDataLock sync.Once // Private state for ensureCachedDataIsPresent to make it concurrency-safe
+ cacheDataResult error // Private state for ensureCachedDataIsPresent
+}
+
+type layerInfo struct {
+ path string
+ size int64
+}
+
+// NewSource returns a tarfile.Source for an image in the specified archive matching ref
+// and sourceIndex (or the only image if they are (nil, -1)).
+// The archive will be closed if closeArchive
+func NewSource(archive *Reader, closeArchive bool, ref reference.NamedTagged, sourceIndex int) *Source {
+ return &Source{
+ archive: archive,
+ closeArchive: closeArchive,
+ ref: ref,
+ sourceIndex: sourceIndex,
+ }
+}
+
+// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
+// It is safe to call this from multi-threaded code.
+func (s *Source) ensureCachedDataIsPresent() error {
+ s.cacheDataLock.Do(func() {
+ s.cacheDataResult = s.ensureCachedDataIsPresentPrivate()
+ })
+ return s.cacheDataResult
+}
+
+// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent.
+// Call ensureCachedDataIsPresent instead.
+func (s *Source) ensureCachedDataIsPresentPrivate() error {
+ tarManifest, _, err := s.archive.ChooseManifestItem(s.ref, s.sourceIndex)
+ if err != nil {
+ return err
+ }
+
+ // Read and parse config.
+ configBytes, err := s.archive.readTarComponent(tarManifest.Config, iolimits.MaxConfigBodySize)
+ if err != nil {
+ return err
+ }
+ var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
+ if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
+ return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config)
+ }
+ if parsedConfig.RootFS == nil {
+ return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
+ }
+
+ knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
+ if err != nil {
+ return err
+ }
+
+ // Success; commit.
+ s.tarManifest = tarManifest
+ s.configBytes = configBytes
+ s.configDigest = digest.FromBytes(configBytes)
+ s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
+ s.knownLayers = knownLayers
+ return nil
+}
+
+// Close removes resources associated with an initialized Source, if any.
+func (s *Source) Close() error {
+ if s.closeArchive {
+ return s.archive.Close()
+ }
+ return nil
+}
+
+// TarManifest returns contents of manifest.json
+func (s *Source) TarManifest() []ManifestItem {
+ return s.archive.Manifest
+}
+
+func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) {
+ // Collect layer data available in manifest and config.
+ if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
+ return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
+ }
+ knownLayers := map[digest.Digest]*layerInfo{}
+ unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
+ for i, diffID := range parsedConfig.RootFS.DiffIDs {
+ if _, ok := knownLayers[diffID]; ok {
+ // Apparently it really can happen that a single image contains the same layer diff more than once.
+ // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter
+ // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
+ continue
+ }
+ layerPath := path.Clean(tarManifest.Layers[i])
+ if _, ok := unknownLayerSizes[layerPath]; ok {
+ return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
+ }
+ li := &layerInfo{ // A new element in each iteration
+ path: layerPath,
+ size: -1,
+ }
+ knownLayers[diffID] = li
+ unknownLayerSizes[layerPath] = li
+ }
+
+ // Scan the tar file to collect layer sizes.
+ file, err := os.Open(s.archive.path)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ t := tar.NewReader(file)
+ for {
+ h, err := t.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ layerPath := path.Clean(h.Name)
+ // FIXME: Cache this data across images in Reader.
+ if li, ok := unknownLayerSizes[layerPath]; ok {
+ // Since GetBlob will decompress layers that are compressed we need
+ // to do the decompression here as well, otherwise we will
+ // incorrectly report the size. Pretty critical, since tools like
+ // umoci always compress layer blobs. Obviously we only bother with
+ // the slower method of checking if it's compressed.
+ uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", layerPath)
+ }
+ defer uncompressedStream.Close()
+
+ uncompressedSize := h.Size
+ if isCompressed {
+ uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error reading %s to find its size", layerPath)
+ }
+ }
+ li.size = uncompressedSize
+ delete(unknownLayerSizes, layerPath)
+ }
+ }
+ if len(unknownLayerSizes) != 0 {
+ return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
+ }
+
+ return knownLayers, nil
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as the primary manifest can not be a list, so there can be no secondary instances.
+func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType.
+ return nil, "", errors.New(`Manifest lists are not supported by "docker-daemon:"`)
+ }
+ if s.generatedManifest == nil {
+ if err := s.ensureCachedDataIsPresent(); err != nil {
+ return nil, "", err
+ }
+ m := manifest.Schema2{
+ SchemaVersion: 2,
+ MediaType: manifest.DockerV2Schema2MediaType,
+ ConfigDescriptor: manifest.Schema2Descriptor{
+ MediaType: manifest.DockerV2Schema2ConfigMediaType,
+ Size: int64(len(s.configBytes)),
+ Digest: s.configDigest,
+ },
+ LayersDescriptors: []manifest.Schema2Descriptor{},
+ }
+ for _, diffID := range s.orderedDiffIDList {
+ li, ok := s.knownLayers[diffID]
+ if !ok {
+ return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
+ }
+ m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{
+ Digest: diffID, // diffID is a digest of the uncompressed tarball
+ MediaType: manifest.DockerV2Schema2LayerMediaType,
+ Size: li.size,
+ })
+ }
+ manifestBytes, err := json.Marshal(&m)
+ if err != nil {
+ return nil, "", err
+ }
+ s.generatedManifest = manifestBytes
+ }
+ return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
+}
+
+// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input.
+type uncompressedReadCloser struct {
+ io.Reader
+ underlyingCloser func() error
+ uncompressedCloser func() error
+}
+
+func (r uncompressedReadCloser) Close() error {
+ var res error
+ if err := r.uncompressedCloser(); err != nil {
+ res = err
+ }
+ if err := r.underlyingCloser(); err != nil && res == nil {
+ res = err
+ }
+ return res
+}
+
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *Source) HasThreadSafeGetBlob() bool {
+ return true
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ if err := s.ensureCachedDataIsPresent(); err != nil {
+ return nil, 0, err
+ }
+
+ if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
+ return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
+ }
+
+ if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
+ underlyingStream, err := s.archive.openTarComponent(li.path)
+ if err != nil {
+ return nil, 0, err
+ }
+ closeUnderlyingStream := true
+ defer func() {
+ if closeUnderlyingStream {
+ underlyingStream.Close()
+ }
+ }()
+
+ // In order to handle the fact that digests != diffIDs (and thus that a
+ // caller which is trying to verify the blob will run into problems),
+ // we need to decompress blobs. This is a bit ugly, but it's a
+ // consequence of making everything addressable by their DiffID rather
+ // than by their digest...
+ //
+ // In particular, because the v2s2 manifest being generated uses
+ // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
+ // layers not their _actual_ digest. The result is that copy/... will
+ // be verifing a "digest" which is not the actual layer's digest (but
+ // is instead the DiffID).
+
+ uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
+ if err != nil {
+ return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest)
+ }
+
+ newStream := uncompressedReadCloser{
+ Reader: uncompressedStream,
+ underlyingCloser: underlyingStream.Close,
+ uncompressedCloser: uncompressedStream.Close,
+ }
+ closeUnderlyingStream = false
+
+ return newStream, li.size, nil
+ }
+
+ return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
+}
+
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as there can be no secondary manifests.
+func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ if instanceDigest != nil {
+ // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType.
+ return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
+ }
+ return [][]byte{}, nil
+}
+
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as the primary manifest can not be a list, so there can be no secondary manifests.
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (s *Source) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go
new file mode 100644
index 000000000..6e6ccd2d8
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go
@@ -0,0 +1,28 @@
+package tarfile
+
+import (
+ "github.com/containers/image/v5/manifest"
+ "github.com/opencontainers/go-digest"
+)
+
+// Various data structures.
+
+// Based on github.com/docker/docker/image/tarexport/tarexport.go
+const (
+ manifestFileName = "manifest.json"
+ legacyLayerFileName = "layer.tar"
+ legacyConfigFileName = "json"
+ legacyVersionFileName = "VERSION"
+ legacyRepositoriesFileName = "repositories"
+)
+
+// ManifestItem is an element of the array stored in the top-level manifest.json file.
+type ManifestItem struct { // NOTE: This is visible as docker/tarfile.ManifestItem, and a part of the stable API.
+ Config string
+ RepoTags []string
+ Layers []string
+ Parent imageID `json:",omitempty"`
+ LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"`
+}
+
+type imageID string
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
new file mode 100644
index 000000000..fd2c461d0
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
@@ -0,0 +1,381 @@
+package tarfile
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// Writer allows creating a (docker save)-formatted tar archive containing one or more images.
+type Writer struct {
+ mutex sync.Mutex
+ // ALL of the following members can only be accessed with the mutex held.
+ // Use Writer.lock() to obtain the mutex.
+ writer io.Writer
+ tar *tar.Writer // nil if the Writer has already been closed.
+ // Other state.
+ blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
+ repositories map[string]map[string]string
+ legacyLayers map[string]struct{} // A set of IDs of legacy layers that have been already sent.
+ manifest []ManifestItem
+ manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above.
+}
+
+// NewWriter returns a Writer for the specified io.Writer.
+// The caller must eventually call .Close() on the returned object to create a valid archive.
+func NewWriter(dest io.Writer) *Writer {
+ return &Writer{
+ writer: dest,
+ tar: tar.NewWriter(dest),
+ blobs: make(map[digest.Digest]types.BlobInfo),
+ repositories: map[string]map[string]string{},
+ legacyLayers: map[string]struct{}{},
+ manifestByConfig: map[digest.Digest]int{},
+ }
+}
+
+// lock does some sanity checks and locks the Writer.
+// If this function suceeds, the caller must call w.unlock.
+// Do not use Writer.mutex directly.
+func (w *Writer) lock() error {
+ w.mutex.Lock()
+ if w.tar == nil {
+ w.mutex.Unlock()
+ return errors.New("Internal error: trying to use an already closed tarfile.Writer")
+ }
+ return nil
+}
+
+// unlock releases the lock obtained by Writer.lock
+// Do not use Writer.mutex directly.
+func (w *Writer) unlock() {
+ w.mutex.Unlock()
+}
+
+// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata.
+// info.Digest must not be empty.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// The caller must have locked the Writer.
+func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) {
+ if info.Digest == "" {
+ return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
+ }
+ if blob, ok := w.blobs[info.Digest]; ok {
+ return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
+ }
+ return false, types.BlobInfo{}, nil
+}
+
+// recordBlob records metadata of a recorded blob, which must contain at least a digest and size.
+// The caller must have locked the Writer.
+func (w *Writer) recordBlobLocked(info types.BlobInfo) {
+ w.blobs[info.Digest] = info
+}
+
+// ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer
+// The caller must have locked the Writer.
+func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error {
+ if _, ok := w.legacyLayers[layerID]; !ok {
+ // Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
+ // See also the comment in physicalLayerPath.
+ physicalLayerPath := w.physicalLayerPath(layerDigest)
+ if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
+ return errors.Wrap(err, "Error creating layer symbolic link")
+ }
+
+ b := []byte("1.0")
+ if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
+ return errors.Wrap(err, "Error writing VERSION file")
+ }
+
+ if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil {
+ return errors.Wrap(err, "Error writing config json file")
+ }
+
+ w.legacyLayers[layerID] = struct{}{}
+ }
+ return nil
+}
+
+// writeLegacyMetadataLocked writes legacy layer metadata and records tags for a single image.
+func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2Descriptor, configBytes []byte, repoTags []reference.NamedTagged) error {
+ var chainID digest.Digest
+ lastLayerID := ""
+ for i, l := range layerDescriptors {
+ // The legacy format requires a config file per layer
+ layerConfig := make(map[string]interface{})
+
+ // The root layer doesn't have any parent
+ if lastLayerID != "" {
+ layerConfig["parent"] = lastLayerID
+ }
+ // The top layer configuration file is generated by using subpart of the image configuration
+ if i == len(layerDescriptors)-1 {
+ var config map[string]*json.RawMessage
+ err := json.Unmarshal(configBytes, &config)
+ if err != nil {
+ return errors.Wrap(err, "Error unmarshaling config")
+ }
+ for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
+ layerConfig[attr] = config[attr]
+ }
+ }
+
+ // This chainID value matches the computation in docker/docker/layer.CreateChainID …
+ if chainID == "" {
+ chainID = l.Digest
+ } else {
+ chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String())
+ }
+ // … but note that the image ID does not _exactly_ match docker/docker/image/v1.CreateID, primarily because
+ // we create the image configs differently in details. At least recent versions allocate new IDs on load,
+ // so this is fine as long as the IDs we use are unique / cannot loop.
+ //
+ // For intermediate images, we could just use the chainID as an image ID, but using a digest of ~the created
+ // config makes sure that everything uses the same “namespace”; a bit less efficient but clearer.
+ //
+ // Temporarily add the chainID to the config, only for the purpose of generating the image ID.
+ layerConfig["layer_id"] = chainID
+ b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point.
+ if err != nil {
+ return errors.Wrap(err, "Error marshaling layer config")
+ }
+ delete(layerConfig, "layer_id")
+ layerID := digest.Canonical.FromBytes(b).Hex()
+ layerConfig["id"] = layerID
+
+ configBytes, err := json.Marshal(layerConfig)
+ if err != nil {
+ return errors.Wrap(err, "Error marshaling layer config")
+ }
+
+ if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil {
+ return err
+ }
+
+ lastLayerID = layerID
+ }
+
+ if lastLayerID != "" {
+ for _, repoTag := range repoTags {
+ if val, ok := w.repositories[repoTag.Name()]; ok {
+ val[repoTag.Tag()] = lastLayerID
+ } else {
+ w.repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): lastLayerID}
+ }
+ }
+ }
+ return nil
+}
+
+// checkManifestItemsMatch checks that a and b describe the same image,
+// and returns an error if that’s not the case (which should never happen).
+func checkManifestItemsMatch(a, b *ManifestItem) error {
+ if a.Config != b.Config {
+ return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config)
+ }
+ if len(a.Layers) != len(b.Layers) {
+ return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers)
+ }
+ for i := range a.Layers {
+ if a.Layers[i] != b.Layers[i] {
+ return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers[i] %#v vs. %#v", a.Layers[i], b.Layers[i])
+ }
+ }
+ // Ignore RepoTags, that will be built later.
+ // Ignore Parent and LayerSources, which we don’t set to anything meaningful.
+ return nil
+}
+
+// ensureManifestItemLocked ensures that there is a manifest item pointing to (layerDescriptors, configDigest) with repoTags
+// The caller must have locked the Writer.
+func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error {
+ layerPaths := []string{}
+ for _, l := range layerDescriptors {
+ layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest))
+ }
+
+ var item *ManifestItem
+ newItem := ManifestItem{
+ Config: w.configPath(configDigest),
+ RepoTags: []string{},
+ Layers: layerPaths,
+ Parent: "", // We don’t have this information
+ LayerSources: nil,
+ }
+ if i, ok := w.manifestByConfig[configDigest]; ok {
+ item = &w.manifest[i]
+ if err := checkManifestItemsMatch(item, &newItem); err != nil {
+ return err
+ }
+ } else {
+ i := len(w.manifest)
+ w.manifestByConfig[configDigest] = i
+ w.manifest = append(w.manifest, newItem)
+ item = &w.manifest[i]
+ }
+
+ knownRepoTags := map[string]struct{}{}
+ for _, repoTag := range item.RepoTags {
+ knownRepoTags[repoTag] = struct{}{}
+ }
+ for _, tag := range repoTags {
+ // For github.com/docker/docker consumers, this works just as well as
+ // refString := ref.String()
+ // because when reading the RepoTags strings, github.com/docker/docker/reference
+ // normalizes both of them to the same value.
+ //
+ // Doing it this way to include the normalized-out `docker.io[/library]` does make
+ // a difference for github.com/projectatomic/docker consumers, with the
+ // “Add --add-registry and --block-registry options to docker daemon” patch.
+ // These consumers treat reference strings which include a hostname and reference
+ // strings without a hostname differently.
+ //
+ // Using the host name here is more explicit about the intent, and it has the same
+ // effect as (docker pull) in projectatomic/docker, which tags the result using
+ // a hostname-qualified reference.
+ // See https://github.com/containers/image/issues/72 for a more detailed
+ // analysis and explanation.
+ refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag())
+
+ if _, ok := knownRepoTags[refString]; !ok {
+ item.RepoTags = append(item.RepoTags, refString)
+ knownRepoTags[refString] = struct{}{}
+ }
+ }
+
+ return nil
+}
+
+// Close writes all outstanding data about images to the archive, and finishes writing data
+// to the underlying io.Writer.
+// No more images can be added after this is called.
+func (w *Writer) Close() error {
+ if err := w.lock(); err != nil {
+ return err
+ }
+ defer w.unlock()
+
+ b, err := json.Marshal(&w.manifest)
+ if err != nil {
+ return err
+ }
+ if err := w.sendBytesLocked(manifestFileName, b); err != nil {
+ return err
+ }
+
+ b, err = json.Marshal(w.repositories)
+ if err != nil {
+ return errors.Wrap(err, "Error marshaling repositories")
+ }
+ if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil {
+ return errors.Wrap(err, "Error writing config json file")
+ }
+
+ if err := w.tar.Close(); err != nil {
+ return err
+ }
+ w.tar = nil // Mark the Writer as closed.
+ return nil
+}
+
+// configPath returns a path we choose for storing a config with the specified digest.
+// NOTE: This is an internal implementation detail, not a format property, and can change
+// any time.
+func (w *Writer) configPath(configDigest digest.Digest) string {
+ return configDigest.Hex() + ".json"
+}
+
+// physicalLayerPath returns a path we choose for storing a layer with the specified digest
+// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format).
+// NOTE: This is an internal implementation detail, not a format property, and can change
+// any time.
+func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string {
+ // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
+ // writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
+ // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
+ // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
+ // in the root of the tarball.
+ return layerDigest.Hex() + ".tar"
+}
+
+type tarFI struct {
+ path string
+ size int64
+ isSymlink bool
+}
+
+func (t *tarFI) Name() string {
+ return t.path
+}
+func (t *tarFI) Size() int64 {
+ return t.size
+}
+func (t *tarFI) Mode() os.FileMode {
+ if t.isSymlink {
+ return os.ModeSymlink
+ }
+ return 0444
+}
+func (t *tarFI) ModTime() time.Time {
+ return time.Unix(0, 0)
+}
+func (t *tarFI) IsDir() bool {
+ return false
+}
+func (t *tarFI) Sys() interface{} {
+ return nil
+}
+
+// sendSymlinkLocked sends a symlink into the tar stream.
+// The caller must have locked the Writer.
+func (w *Writer) sendSymlinkLocked(path string, target string) error {
+ hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target)
+ if err != nil {
+ return nil
+ }
+ logrus.Debugf("Sending as tar link %s -> %s", path, target)
+ return w.tar.WriteHeader(hdr)
+}
+
+// sendBytesLocked sends a path into the tar stream.
+// The caller must have locked the Writer.
+func (w *Writer) sendBytesLocked(path string, b []byte) error {
+ return w.sendFileLocked(path, int64(len(b)), bytes.NewReader(b))
+}
+
+// sendFileLocked sends a file into the tar stream.
+// The caller must have locked the Writer.
+func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reader) error {
+ hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
+ if err != nil {
+ return nil
+ }
+ logrus.Debugf("Sending as tar file %s", path)
+ if err := w.tar.WriteHeader(hdr); err != nil {
+ return err
+ }
+ // TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
+ size, err := io.Copy(w.tar, stream)
+ if err != nil {
+ return err
+ }
+ if size != expectedSize {
+ return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/lookaside.go
index 918c0f838..6931fd07b 100644
--- a/vendor/github.com/containers/image/v5/docker/lookaside.go
+++ b/vendor/github.com/containers/image/v5/docker/lookaside.go
@@ -11,6 +11,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/homedir"
"github.com/ghodss/yaml"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@@ -26,6 +27,9 @@ var systemRegistriesDirPath = builtinRegistriesDirPath
// DO NOT change this, instead see systemRegistriesDirPath above.
const builtinRegistriesDirPath = "/etc/containers/registries.d"
+// userRegistriesDirPath is the path to the per user registries.d.
+var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d")
+
// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
// NOTE: Keep this in sync with docs/registries.d.md!
type registryConfiguration struct {
@@ -75,14 +79,17 @@ func configuredSignatureStorageBase(sys *types.SystemContext, ref dockerReferenc
// registriesDirPath returns a path to registries.d
func registriesDirPath(sys *types.SystemContext) string {
- if sys != nil {
- if sys.RegistriesDirPath != "" {
- return sys.RegistriesDirPath
- }
- if sys.RootForImplicitAbsolutePaths != "" {
- return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
- }
+ if sys != nil && sys.RegistriesDirPath != "" {
+ return sys.RegistriesDirPath
}
+ userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
+ if _, err := os.Stat(userRegistriesDirPath); err == nil {
+ return userRegistriesDirPath
+ }
+ if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
+ }
+
return systemRegistriesDirPath
}
diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go
index c171da505..af1690683 100644
--- a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go
+++ b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go
@@ -1,36 +1,19 @@
package tarfile
import (
- "archive/tar"
- "bytes"
"context"
- "encoding/json"
- "fmt"
"io"
- "io/ioutil"
- "os"
- "path/filepath"
- "time"
+ internal "github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/internal/iolimits"
- "github.com/containers/image/v5/internal/tmpdir"
- "github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
type Destination struct {
- writer io.Writer
- tar *tar.Writer
- repoTags []reference.NamedTagged
- // Other state.
- blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
- config []byte
- sysCtx *types.SystemContext
+ internal *internal.Destination
+ archive *internal.Writer
}
// NewDestination returns a tarfile.Destination for the specified io.Writer.
@@ -41,59 +24,51 @@ func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
// NewDestinationWithContext returns a tarfile.Destination for the specified io.Writer.
func NewDestinationWithContext(sys *types.SystemContext, dest io.Writer, ref reference.NamedTagged) *Destination {
- repoTags := []reference.NamedTagged{}
- if ref != nil {
- repoTags = append(repoTags, ref)
- }
+ archive := internal.NewWriter(dest)
return &Destination{
- writer: dest,
- tar: tar.NewWriter(dest),
- repoTags: repoTags,
- blobs: make(map[digest.Digest]types.BlobInfo),
- sysCtx: sys,
+ internal: internal.NewDestination(sys, archive, ref),
+ archive: archive,
}
}
// AddRepoTags adds the specified tags to the destination's repoTags.
func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
- d.repoTags = append(d.repoTags, tags...)
+ d.internal.AddRepoTags(tags)
}
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
// If an empty slice or nil it's returned, then any mime type can be tried to upload
func (d *Destination) SupportedManifestMIMETypes() []string {
- return []string{
- manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
- }
+ return d.internal.SupportedManifestMIMETypes()
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *Destination) SupportsSignatures(ctx context.Context) error {
- return errors.Errorf("Storing signatures for docker tar files is not supported")
+ return d.internal.SupportsSignatures(ctx)
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *Destination) AcceptsForeignLayerURLs() bool {
- return false
+ return d.internal.AcceptsForeignLayerURLs()
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (d *Destination) MustMatchRuntimeOS() bool {
- return false
+ return d.internal.MustMatchRuntimeOS()
}
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
// Does not make a difference if Reference().DockerReference() is nil.
func (d *Destination) IgnoresEmbeddedDockerReference() bool {
- return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
+ return d.internal.IgnoresEmbeddedDockerReference()
}
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (d *Destination) HasThreadSafePutBlob() bool {
- return false
+ return d.internal.HasThreadSafePutBlob()
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
@@ -104,66 +79,7 @@ func (d *Destination) HasThreadSafePutBlob() bool {
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- // Ouch, we need to stream the blob into a temporary file just to determine the size.
- // When the layer is decompressed, we also have to generate the digest on uncompressed datas.
- if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
- logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
- streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
- if err != nil {
- return types.BlobInfo{}, err
- }
- defer os.Remove(streamCopy.Name())
- defer streamCopy.Close()
-
- digester := digest.Canonical.Digester()
- tee := io.TeeReader(stream, digester.Hash())
- // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- size, err := io.Copy(streamCopy, tee)
- if err != nil {
- return types.BlobInfo{}, err
- }
- _, err = streamCopy.Seek(0, io.SeekStart)
- if err != nil {
- return types.BlobInfo{}, err
- }
- inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
- if inputInfo.Digest == "" {
- inputInfo.Digest = digester.Digest()
- }
- stream = streamCopy
- logrus.Debugf("... streaming done")
- }
-
- // Maybe the blob has been already sent
- ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false)
- if err != nil {
- return types.BlobInfo{}, err
- }
- if ok {
- return reusedInfo, nil
- }
-
- if isConfig {
- buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
- if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream")
- }
- d.config = buf
- if err := d.sendFile(inputInfo.Digest.Hex()+".json", inputInfo.Size, bytes.NewReader(buf)); err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file")
- }
- } else {
- // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
- // writeLegacyLayerMetadata constructs layer IDs differently from inputinfo.Digest values (as described
- // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
- // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
- // in the root of the tarball.
- if err := d.sendFile(inputInfo.Digest.Hex()+".tar", inputInfo.Size, stream); err != nil {
- return types.BlobInfo{}, err
- }
- }
- d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}
- return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
+ return d.internal.PutBlob(ctx, stream, inputInfo, cache, isConfig)
}
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
@@ -174,33 +90,7 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
- if info.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
- }
- if blob, ok := d.blobs[info.Digest]; ok {
- return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
- }
- return false, types.BlobInfo{}, nil
-}
-
-func (d *Destination) createRepositoriesFile(rootLayerID string) error {
- repositories := map[string]map[string]string{}
- for _, repoTag := range d.repoTags {
- if val, ok := repositories[repoTag.Name()]; ok {
- val[repoTag.Tag()] = rootLayerID
- } else {
- repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): rootLayerID}
- }
- }
-
- b, err := json.Marshal(repositories)
- if err != nil {
- return errors.Wrap(err, "Error marshaling repositories")
- }
- if err := d.sendBytes(legacyRepositoriesFileName, b); err != nil {
- return errors.Wrap(err, "Error writing config json file")
- }
- return nil
+ return d.internal.TryReusingBlob(ctx, info, cache, canSubstitute)
}
// PutManifest writes manifest to the destination.
@@ -210,215 +100,18 @@ func (d *Destination) createRepositoriesFile(rootLayerID string) error {
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
- if instanceDigest != nil {
- return errors.New(`Manifest lists are not supported for docker tar files`)
- }
- // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
- // so the caller trying a different manifest kind would be pointless.
- var man manifest.Schema2
- if err := json.Unmarshal(m, &man); err != nil {
- return errors.Wrap(err, "Error parsing manifest")
- }
- if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
- return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
- }
-
- layerPaths, lastLayerID, err := d.writeLegacyLayerMetadata(man.LayersDescriptors)
- if err != nil {
- return err
- }
-
- if len(man.LayersDescriptors) > 0 {
- if err := d.createRepositoriesFile(lastLayerID); err != nil {
- return err
- }
- }
-
- repoTags := []string{}
- for _, tag := range d.repoTags {
- // For github.com/docker/docker consumers, this works just as well as
- // refString := ref.String()
- // because when reading the RepoTags strings, github.com/docker/docker/reference
- // normalizes both of them to the same value.
- //
- // Doing it this way to include the normalized-out `docker.io[/library]` does make
- // a difference for github.com/projectatomic/docker consumers, with the
- // “Add --add-registry and --block-registry options to docker daemon” patch.
- // These consumers treat reference strings which include a hostname and reference
- // strings without a hostname differently.
- //
- // Using the host name here is more explicit about the intent, and it has the same
- // effect as (docker pull) in projectatomic/docker, which tags the result using
- // a hostname-qualified reference.
- // See https://github.com/containers/image/issues/72 for a more detailed
- // analysis and explanation.
- refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag())
- repoTags = append(repoTags, refString)
- }
-
- items := []ManifestItem{{
- Config: man.ConfigDescriptor.Digest.Hex() + ".json",
- RepoTags: repoTags,
- Layers: layerPaths,
- Parent: "",
- LayerSources: nil,
- }}
- itemsBytes, err := json.Marshal(&items)
- if err != nil {
- return err
- }
-
- // FIXME? Do we also need to support the legacy format?
- return d.sendBytes(manifestFileName, itemsBytes)
-}
-
-// writeLegacyLayerMetadata writes legacy VERSION and configuration files for all layers
-func (d *Destination) writeLegacyLayerMetadata(layerDescriptors []manifest.Schema2Descriptor) (layerPaths []string, lastLayerID string, err error) {
- var chainID digest.Digest
- lastLayerID = ""
- for i, l := range layerDescriptors {
- // This chainID value matches the computation in docker/docker/layer.CreateChainID …
- if chainID == "" {
- chainID = l.Digest
- } else {
- chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String())
- }
- // … but note that this image ID does not match docker/docker/image/v1.CreateID. At least recent
- // versions allocate new IDs on load, as long as the IDs we use are unique / cannot loop.
- //
- // Overall, the goal of computing a digest dependent on the full history is to avoid reusing an image ID
- // (and possibly creating a loop in the "parent" links) if a layer with the same DiffID appears two or more
- // times in layersDescriptors. The ChainID values are sufficient for this, the v1.CreateID computation
- // which also mixes in the full image configuration seems unnecessary, at least as long as we are storing
- // only a single image per tarball, i.e. all DiffID prefixes are unique (can’t differ only with
- // configuration).
- layerID := chainID.Hex()
-
- physicalLayerPath := l.Digest.Hex() + ".tar"
- // The layer itself has been stored into physicalLayerPath in PutManifest.
- // So, use that path for layerPaths used in the non-legacy manifest
- layerPaths = append(layerPaths, physicalLayerPath)
- // ... and create a symlink for the legacy format;
- if err := d.sendSymlink(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
- return nil, "", errors.Wrap(err, "Error creating layer symbolic link")
- }
-
- b := []byte("1.0")
- if err := d.sendBytes(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
- return nil, "", errors.Wrap(err, "Error writing VERSION file")
- }
-
- // The legacy format requires a config file per layer
- layerConfig := make(map[string]interface{})
- layerConfig["id"] = layerID
-
- // The root layer doesn't have any parent
- if lastLayerID != "" {
- layerConfig["parent"] = lastLayerID
- }
- // The root layer configuration file is generated by using subpart of the image configuration
- if i == len(layerDescriptors)-1 {
- var config map[string]*json.RawMessage
- err := json.Unmarshal(d.config, &config)
- if err != nil {
- return nil, "", errors.Wrap(err, "Error unmarshaling config")
- }
- for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
- layerConfig[attr] = config[attr]
- }
- }
- b, err := json.Marshal(layerConfig)
- if err != nil {
- return nil, "", errors.Wrap(err, "Error marshaling layer config")
- }
- if err := d.sendBytes(filepath.Join(layerID, legacyConfigFileName), b); err != nil {
- return nil, "", errors.Wrap(err, "Error writing config json file")
- }
-
- lastLayerID = layerID
- }
- return layerPaths, lastLayerID, nil
-}
-
-type tarFI struct {
- path string
- size int64
- isSymlink bool
-}
-
-func (t *tarFI) Name() string {
- return t.path
-}
-func (t *tarFI) Size() int64 {
- return t.size
-}
-func (t *tarFI) Mode() os.FileMode {
- if t.isSymlink {
- return os.ModeSymlink
- }
- return 0444
-}
-func (t *tarFI) ModTime() time.Time {
- return time.Unix(0, 0)
-}
-func (t *tarFI) IsDir() bool {
- return false
-}
-func (t *tarFI) Sys() interface{} {
- return nil
-}
-
-// sendSymlink sends a symlink into the tar stream.
-func (d *Destination) sendSymlink(path string, target string) error {
- hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target)
- if err != nil {
- return nil
- }
- logrus.Debugf("Sending as tar link %s -> %s", path, target)
- return d.tar.WriteHeader(hdr)
-}
-
-// sendBytes sends a path into the tar stream.
-func (d *Destination) sendBytes(path string, b []byte) error {
- return d.sendFile(path, int64(len(b)), bytes.NewReader(b))
-}
-
-// sendFile sends a file into the tar stream.
-func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error {
- hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
- if err != nil {
- return nil
- }
- logrus.Debugf("Sending as tar file %s", path)
- if err := d.tar.WriteHeader(hdr); err != nil {
- return err
- }
- // TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
- size, err := io.Copy(d.tar, stream)
- if err != nil {
- return err
- }
- if size != expectedSize {
- return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
- }
- return nil
+ return d.internal.PutManifest(ctx, m, instanceDigest)
}
// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- if instanceDigest != nil {
- return errors.Errorf(`Manifest lists are not supported for docker tar files`)
- }
- if len(signatures) != 0 {
- return errors.Errorf("Storing signatures for docker tar files is not supported")
- }
- return nil
+ return d.internal.PutSignatures(ctx, signatures, instanceDigest)
}
// Commit finishes writing data to the underlying io.Writer.
// It is the caller's responsibility to close it, if necessary.
func (d *Destination) Commit(ctx context.Context) error {
- return d.tar.Close()
+ return d.archive.Close()
}
diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/tarfile/src.go
index 4d2368c70..ee341eb39 100644
--- a/vendor/github.com/containers/image/v5/docker/tarfile/src.go
+++ b/vendor/github.com/containers/image/v5/docker/tarfile/src.go
@@ -1,51 +1,20 @@
package tarfile
import (
- "archive/tar"
- "bytes"
"context"
- "encoding/json"
"io"
- "io/ioutil"
- "os"
- "path"
- "sync"
- "github.com/containers/image/v5/internal/iolimits"
- "github.com/containers/image/v5/internal/tmpdir"
- "github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/pkg/compression"
+ internal "github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// Source is a partial implementation of types.ImageSource for reading from tarPath.
+// Most users should use this via implementations of ImageReference from docker/archive or docker/daemon.
type Source struct {
- tarPath string
- removeTarPathOnClose bool // Remove temp file on close if true
- // The following data is only available after ensureCachedDataIsPresent() succeeds
- tarManifest *ManifestItem // nil if not available yet.
- configBytes []byte
- configDigest digest.Digest
- orderedDiffIDList []digest.Digest
- knownLayers map[digest.Digest]*layerInfo
- // Other state
- generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
- cacheDataLock sync.Once // Private state for ensureCachedDataIsPresent to make it concurrency-safe
- cacheDataResult error // Private state for ensureCachedDataIsPresent
+ internal *internal.Source
}
-type layerInfo struct {
- path string
- size int64
-}
-
-// TODO: We could add support for multiple images in a single archive, so
-// that people could use docker-archive:opensuse.tar:opensuse:leap as
-// the source of an image.
-// To do for both the NewSourceFromFile and NewSourceFromStream functions
-
// NewSourceFromFile returns a tarfile.Source for the specified path.
// Deprecated: Please use NewSourceFromFileWithContext which will allows you to configure temp directory
// for big files through SystemContext.BigFilesTemporaryDir
@@ -55,25 +24,12 @@ func NewSourceFromFile(path string) (*Source, error) {
// NewSourceFromFileWithContext returns a tarfile.Source for the specified path.
func NewSourceFromFileWithContext(sys *types.SystemContext, path string) (*Source, error) {
- file, err := os.Open(path)
- if err != nil {
- return nil, errors.Wrapf(err, "error opening file %q", path)
- }
- defer file.Close()
-
- // If the file is already not compressed we can just return the file itself
- // as a source. Otherwise we pass the stream to NewSourceFromStream.
- stream, isCompressed, err := compression.AutoDecompress(file)
+ archive, err := internal.NewReaderFromFile(sys, path)
if err != nil {
- return nil, errors.Wrapf(err, "Error detecting compression for file %q", path)
- }
- defer stream.Close()
- if !isCompressed {
- return &Source{
- tarPath: path,
- }, nil
+ return nil, err
}
- return NewSourceFromStreamWithSystemContext(sys, stream)
+ src := internal.NewSource(archive, true, nil, -1)
+ return &Source{internal: src}, nil
}
// NewSourceFromStream returns a tarfile.Source for the specified inputStream,
@@ -89,280 +45,22 @@ func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
// which can be either compressed or uncompressed. The caller can close the
// inputStream immediately after NewSourceFromFile returns.
func NewSourceFromStreamWithSystemContext(sys *types.SystemContext, inputStream io.Reader) (*Source, error) {
- // FIXME: use SystemContext here.
- // Save inputStream to a temporary file
- tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
- if err != nil {
- return nil, errors.Wrap(err, "error creating temporary file")
- }
- defer tarCopyFile.Close()
-
- succeeded := false
- defer func() {
- if !succeeded {
- os.Remove(tarCopyFile.Name())
- }
- }()
-
- // In order to be compatible with docker-load, we need to support
- // auto-decompression (it's also a nice quality-of-life thing to avoid
- // giving users really confusing "invalid tar header" errors).
- uncompressedStream, _, err := compression.AutoDecompress(inputStream)
- if err != nil {
- return nil, errors.Wrap(err, "Error auto-decompressing input")
- }
- defer uncompressedStream.Close()
-
- // Copy the plain archive to the temporary file.
- //
- // TODO: This can take quite some time, and should ideally be cancellable
- // using a context.Context.
- if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
- return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name())
- }
- succeeded = true
-
- return &Source{
- tarPath: tarCopyFile.Name(),
- removeTarPathOnClose: true,
- }, nil
-}
-
-// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
-type tarReadCloser struct {
- *tar.Reader
- backingFile *os.File
-}
-
-func (t *tarReadCloser) Close() error {
- return t.backingFile.Close()
-}
-
-// openTarComponent returns a ReadCloser for the specific file within the archive.
-// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
-// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
-// The caller should call .Close() on the returned stream.
-func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) {
- f, err := os.Open(s.tarPath)
- if err != nil {
- return nil, err
- }
- succeeded := false
- defer func() {
- if !succeeded {
- f.Close()
- }
- }()
-
- tarReader, header, err := findTarComponent(f, componentPath)
- if err != nil {
- return nil, err
- }
- if header == nil {
- return nil, os.ErrNotExist
- }
- if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
- // We follow only one symlink; so no loops are possible.
- if _, err := f.Seek(0, io.SeekStart); err != nil {
- return nil, err
- }
- // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
- // so we don't care.
- tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
- if err != nil {
- return nil, err
- }
- if header == nil {
- return nil, os.ErrNotExist
- }
- }
-
- if !header.FileInfo().Mode().IsRegular() {
- return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
- }
- succeeded = true
- return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
-}
-
-// findTarComponent returns a header and a reader matching path within inputFile,
-// or (nil, nil, nil) if not found.
-func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) {
- t := tar.NewReader(inputFile)
- for {
- h, err := t.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return nil, nil, err
- }
- if h.Name == path {
- return t, h, nil
- }
- }
- return nil, nil, nil
-}
-
-// readTarComponent returns full contents of componentPath.
-func (s *Source) readTarComponent(path string, limit int) ([]byte, error) {
- file, err := s.openTarComponent(path)
- if err != nil {
- return nil, errors.Wrapf(err, "Error loading tar component %s", path)
- }
- defer file.Close()
- bytes, err := iolimits.ReadAtMost(file, limit)
- if err != nil {
- return nil, err
- }
- return bytes, nil
-}
-
-// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
-// It is safe to call this from multi-threaded code.
-func (s *Source) ensureCachedDataIsPresent() error {
- s.cacheDataLock.Do(func() {
- s.cacheDataResult = s.ensureCachedDataIsPresentPrivate()
- })
- return s.cacheDataResult
-}
-
-// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent.
-// Call ensureCachedDataIsPresent instead.
-func (s *Source) ensureCachedDataIsPresentPrivate() error {
- // Read and parse manifest.json
- tarManifest, err := s.loadTarManifest()
- if err != nil {
- return err
- }
-
- // Check to make sure length is 1
- if len(tarManifest) != 1 {
- return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
- }
-
- // Read and parse config.
- configBytes, err := s.readTarComponent(tarManifest[0].Config, iolimits.MaxConfigBodySize)
- if err != nil {
- return err
- }
- var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
- if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
- return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
- }
- if parsedConfig.RootFS == nil {
- return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest[0].Config)
- }
-
- knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
- if err != nil {
- return err
- }
-
- // Success; commit.
- s.tarManifest = &tarManifest[0]
- s.configBytes = configBytes
- s.configDigest = digest.FromBytes(configBytes)
- s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
- s.knownLayers = knownLayers
- return nil
-}
-
-// loadTarManifest loads and decodes the manifest.json.
-func (s *Source) loadTarManifest() ([]ManifestItem, error) {
- // FIXME? Do we need to deal with the legacy format?
- bytes, err := s.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize)
+ archive, err := internal.NewReaderFromStream(sys, inputStream)
if err != nil {
return nil, err
}
- var items []ManifestItem
- if err := json.Unmarshal(bytes, &items); err != nil {
- return nil, errors.Wrap(err, "Error decoding tar manifest.json")
- }
- return items, nil
+ src := internal.NewSource(archive, true, nil, -1)
+ return &Source{internal: src}, nil
}
// Close removes resources associated with an initialized Source, if any.
func (s *Source) Close() error {
- if s.removeTarPathOnClose {
- return os.Remove(s.tarPath)
- }
- return nil
+ return s.internal.Close()
}
// LoadTarManifest loads and decodes the manifest.json
func (s *Source) LoadTarManifest() ([]ManifestItem, error) {
- return s.loadTarManifest()
-}
-
-func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) {
- // Collect layer data available in manifest and config.
- if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
- return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
- }
- knownLayers := map[digest.Digest]*layerInfo{}
- unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
- for i, diffID := range parsedConfig.RootFS.DiffIDs {
- if _, ok := knownLayers[diffID]; ok {
- // Apparently it really can happen that a single image contains the same layer diff more than once.
- // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter
- // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
- continue
- }
- layerPath := tarManifest.Layers[i]
- if _, ok := unknownLayerSizes[layerPath]; ok {
- return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
- }
- li := &layerInfo{ // A new element in each iteration
- path: layerPath,
- size: -1,
- }
- knownLayers[diffID] = li
- unknownLayerSizes[layerPath] = li
- }
-
- // Scan the tar file to collect layer sizes.
- file, err := os.Open(s.tarPath)
- if err != nil {
- return nil, err
- }
- defer file.Close()
- t := tar.NewReader(file)
- for {
- h, err := t.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return nil, err
- }
- if li, ok := unknownLayerSizes[h.Name]; ok {
- // Since GetBlob will decompress layers that are compressed we need
- // to do the decompression here as well, otherwise we will
- // incorrectly report the size. Pretty critical, since tools like
- // umoci always compress layer blobs. Obviously we only bother with
- // the slower method of checking if it's compressed.
- uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
- if err != nil {
- return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", h.Name)
- }
- defer uncompressedStream.Close()
-
- uncompressedSize := h.Size
- if isCompressed {
- uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
- if err != nil {
- return nil, errors.Wrapf(err, "Error reading %s to find its size", h.Name)
- }
- }
- li.size = uncompressedSize
- delete(unknownLayerSizes, h.Name)
- }
- }
- if len(unknownLayerSizes) != 0 {
- return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
- }
-
- return knownLayers, nil
+ return s.internal.TarManifest(), nil
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
@@ -372,130 +70,26 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
// as the primary manifest can not be a list, so there can be no secondary instances.
func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
- if instanceDigest != nil {
- // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType.
- return nil, "", errors.New(`Manifest lists are not supported by "docker-daemon:"`)
- }
- if s.generatedManifest == nil {
- if err := s.ensureCachedDataIsPresent(); err != nil {
- return nil, "", err
- }
- m := manifest.Schema2{
- SchemaVersion: 2,
- MediaType: manifest.DockerV2Schema2MediaType,
- ConfigDescriptor: manifest.Schema2Descriptor{
- MediaType: manifest.DockerV2Schema2ConfigMediaType,
- Size: int64(len(s.configBytes)),
- Digest: s.configDigest,
- },
- LayersDescriptors: []manifest.Schema2Descriptor{},
- }
- for _, diffID := range s.orderedDiffIDList {
- li, ok := s.knownLayers[diffID]
- if !ok {
- return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
- }
- m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{
- Digest: diffID, // diffID is a digest of the uncompressed tarball
- MediaType: manifest.DockerV2Schema2LayerMediaType,
- Size: li.size,
- })
- }
- manifestBytes, err := json.Marshal(&m)
- if err != nil {
- return nil, "", err
- }
- s.generatedManifest = manifestBytes
- }
- return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
-}
-
-// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input.
-type uncompressedReadCloser struct {
- io.Reader
- underlyingCloser func() error
- uncompressedCloser func() error
-}
-
-func (r uncompressedReadCloser) Close() error {
- var res error
- if err := r.uncompressedCloser(); err != nil {
- res = err
- }
- if err := r.underlyingCloser(); err != nil && res == nil {
- res = err
- }
- return res
+ return s.internal.GetManifest(ctx, instanceDigest)
}
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
func (s *Source) HasThreadSafeGetBlob() bool {
- return true
+ return s.internal.HasThreadSafeGetBlob()
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
- if err := s.ensureCachedDataIsPresent(); err != nil {
- return nil, 0, err
- }
-
- if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
- return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
- }
-
- if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
- underlyingStream, err := s.openTarComponent(li.path)
- if err != nil {
- return nil, 0, err
- }
- closeUnderlyingStream := true
- defer func() {
- if closeUnderlyingStream {
- underlyingStream.Close()
- }
- }()
-
- // In order to handle the fact that digests != diffIDs (and thus that a
- // caller which is trying to verify the blob will run into problems),
- // we need to decompress blobs. This is a bit ugly, but it's a
- // consequence of making everything addressable by their DiffID rather
- // than by their digest...
- //
- // In particular, because the v2s2 manifest being generated uses
- // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
- // layers not their _actual_ digest. The result is that copy/... will
- // be verifing a "digest" which is not the actual layer's digest (but
- // is instead the DiffID).
-
- uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
- if err != nil {
- return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest)
- }
-
- newStream := uncompressedReadCloser{
- Reader: uncompressedStream,
- underlyingCloser: underlyingStream.Close,
- uncompressedCloser: uncompressedStream.Close,
- }
- closeUnderlyingStream = false
-
- return newStream, li.size, nil
- }
-
- return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
+ return s.internal.GetBlob(ctx, info, cache)
}
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
// as there can be no secondary manifests.
func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- if instanceDigest != nil {
- // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType.
- return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
- }
- return [][]byte{}, nil
+ return s.internal.GetSignatures(ctx, instanceDigest)
}
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
@@ -505,6 +99,6 @@ func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Diges
// as the primary manifest can not be a list, so there can be no secondary manifests.
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *Source) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
+func (s *Source) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ return s.internal.LayerInfosForCopy(ctx, instanceDigest)
}
diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/types.go b/vendor/github.com/containers/image/v5/docker/tarfile/types.go
index ac222528a..0f14389e6 100644
--- a/vendor/github.com/containers/image/v5/docker/tarfile/types.go
+++ b/vendor/github.com/containers/image/v5/docker/tarfile/types.go
@@ -1,28 +1,8 @@
package tarfile
import (
- "github.com/containers/image/v5/manifest"
- "github.com/opencontainers/go-digest"
-)
-
-// Various data structures.
-
-// Based on github.com/docker/docker/image/tarexport/tarexport.go
-const (
- manifestFileName = "manifest.json"
- legacyLayerFileName = "layer.tar"
- legacyConfigFileName = "json"
- legacyVersionFileName = "VERSION"
- legacyRepositoriesFileName = "repositories"
+ internal "github.com/containers/image/v5/docker/internal/tarfile"
)
// ManifestItem is an element of the array stored in the top-level manifest.json file.
-type ManifestItem struct {
- Config string
- RepoTags []string
- Layers []string
- Parent imageID `json:",omitempty"`
- LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"`
-}
-
-type imageID string
+type ManifestItem = internal.ManifestItem // All public members from the internal package remain accessible.
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
index f515203df..9925aeda7 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
@@ -141,6 +141,10 @@ func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *dige
}
func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
+ if len(urls) == 0 {
+ return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
+ }
+
errWrap := errors.New("failed fetching external blob from all urls")
for _, url := range urls {
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
index ce85af18a..5d7598648 100644
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
@@ -11,9 +11,9 @@ import (
"strings"
"github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/homedir"
helperclient "github.com/docker/docker-credential-helpers/client"
"github.com/docker/docker-credential-helpers/credentials"
- "github.com/docker/docker/pkg/homedir"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -345,7 +345,7 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
return errors.Wrapf(err, "error marshaling JSON %q", path)
}
- if err = ioutil.WriteFile(path, newData, 0755); err != nil {
+ if err = ioutil.WriteFile(path, newData, 0600); err != nil {
return errors.Wrapf(err, "error writing to file %q", path)
}
}
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
index 8ecb47de4..9a5712654 100644
--- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
@@ -338,55 +338,86 @@ func (config *V2RegistriesConf) postProcess() error {
}
// ConfigPath returns the path to the system-wide registry configuration file.
+// Deprecated: This API implies configuration is read from files, and that there is only one.
+// Please use ConfigurationSourceDescription to obtain a string usable for error messages.
func ConfigPath(ctx *types.SystemContext) string {
- if ctx != nil && ctx.SystemRegistriesConfPath != "" {
- return ctx.SystemRegistriesConfPath
- }
-
- userRegistriesFilePath := filepath.Join(homedir.Get(), userRegistriesFile)
- if _, err := os.Stat(userRegistriesFilePath); err == nil {
- return userRegistriesFilePath
- }
-
- if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
- return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
- }
-
- return systemRegistriesConfPath
+ return newConfigWrapper(ctx).configPath
}
-// ConfigDirPath returns the path to the system-wide directory for drop-in
+// ConfigDirPath returns the path to the directory for drop-in
// registry configuration files.
+// Deprecated: This API implies configuration is read from directories, and that there is only one.
+// Please use ConfigurationSourceDescription to obtain a string usable for error messages.
func ConfigDirPath(ctx *types.SystemContext) string {
- if ctx != nil && ctx.SystemRegistriesConfDirPath != "" {
- return ctx.SystemRegistriesConfDirPath
+ configWrapper := newConfigWrapper(ctx)
+ if configWrapper.userConfigDirPath != "" {
+ return configWrapper.userConfigDirPath
}
-
- userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
- if _, err := os.Stat(userRegistriesDirPath); err == nil {
- return userRegistriesDirPath
- }
-
- if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
- return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfDirPath)
- }
-
- return systemRegistriesConfDirPath
+ return configWrapper.configDirPath
}
// configWrapper is used to store the paths from ConfigPath and ConfigDirPath
// and acts as a key to the internal cache.
type configWrapper struct {
- configPath string
+ // path to the registries.conf file
+ configPath string
+ // path to system-wide registries.conf.d directory, or "" if not used
configDirPath string
+ // path to user specificed registries.conf.d directory, or "" if not used
+ userConfigDirPath string
}
// newConfigWrapper returns a configWrapper for the specified SystemContext.
func newConfigWrapper(ctx *types.SystemContext) configWrapper {
- return configWrapper{
- configPath: ConfigPath(ctx),
- configDirPath: ConfigDirPath(ctx),
+ var wrapper configWrapper
+ userRegistriesFilePath := filepath.Join(homedir.Get(), userRegistriesFile)
+ userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
+
+ // decide configPath using per-user path or system file
+ if ctx != nil && ctx.SystemRegistriesConfPath != "" {
+ wrapper.configPath = ctx.SystemRegistriesConfPath
+ } else if _, err := os.Stat(userRegistriesFilePath); err == nil {
+ // per-user registries.conf exists, not reading system dir
+ // return config dirs from ctx or per-user one
+ wrapper.configPath = userRegistriesFilePath
+ if ctx != nil && ctx.SystemRegistriesConfDirPath != "" {
+ wrapper.configDirPath = ctx.SystemRegistriesConfDirPath
+ } else {
+ wrapper.userConfigDirPath = userRegistriesDirPath
+ }
+ return wrapper
+ } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
+ wrapper.configPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
+ } else {
+ wrapper.configPath = systemRegistriesConfPath
+ }
+
+ // potentially use both system and per-user dirs if not using per-user config file
+ if ctx != nil && ctx.SystemRegistriesConfDirPath != "" {
+ // dir explicitly chosen: use only that one
+ wrapper.configDirPath = ctx.SystemRegistriesConfDirPath
+ } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
+ wrapper.configDirPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfDirPath)
+ wrapper.userConfigDirPath = userRegistriesDirPath
+ } else {
+ wrapper.configDirPath = systemRegistriesConfDirPath
+ wrapper.userConfigDirPath = userRegistriesDirPath
}
+
+ return wrapper
+}
+
+// ConfigurationSourceDescription returns a string containres paths of registries.conf and registries.conf.d
+func ConfigurationSourceDescription(ctx *types.SystemContext) string {
+ wrapper := newConfigWrapper(ctx)
+ configSources := []string{wrapper.configPath}
+ if wrapper.configDirPath != "" {
+ configSources = append(configSources, wrapper.configDirPath)
+ }
+ if wrapper.userConfigDirPath != "" {
+ configSources = append(configSources, wrapper.userConfigDirPath)
+ }
+ return strings.Join(configSources, ", ")
}
// configMutex is used to synchronize concurrent accesses to configCache.
@@ -422,39 +453,49 @@ func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) {
// dropInConfigs returns a slice of drop-in-configs from the registries.conf.d
// directory.
func dropInConfigs(wrapper configWrapper) ([]string, error) {
- var configs []string
-
- err := filepath.Walk(wrapper.configDirPath,
- // WalkFunc to read additional configs
- func(path string, info os.FileInfo, err error) error {
- switch {
- case err != nil:
- // return error (could be a permission problem)
- return err
- case info == nil:
- // this should only happen when err != nil but let's be sure
- return nil
- case info.IsDir():
- if path != wrapper.configDirPath {
- // make sure to not recurse into sub-directories
- return filepath.SkipDir
- }
- // ignore directories
- return nil
- default:
- // only add *.conf files
- if strings.HasSuffix(path, ".conf") {
- configs = append(configs, path)
- }
- return nil
- }
- },
+ var (
+ configs []string
+ dirPaths []string
)
+ if wrapper.configDirPath != "" {
+ dirPaths = append(dirPaths, wrapper.configDirPath)
+ }
+ if wrapper.userConfigDirPath != "" {
+ dirPaths = append(dirPaths, wrapper.userConfigDirPath)
+ }
+ for _, dirPath := range dirPaths {
+ err := filepath.Walk(dirPath,
+ // WalkFunc to read additional configs
+ func(path string, info os.FileInfo, err error) error {
+ switch {
+ case err != nil:
+ // return error (could be a permission problem)
+ return err
+ case info == nil:
+ // this should only happen when err != nil but let's be sure
+ return nil
+ case info.IsDir():
+ if path != dirPath {
+ // make sure to not recurse into sub-directories
+ return filepath.SkipDir
+ }
+ // ignore directories
+ return nil
+ default:
+ // only add *.conf files
+ if strings.HasSuffix(path, ".conf") {
+ configs = append(configs, path)
+ }
+ return nil
+ }
+ },
+ )
- if err != nil && !os.IsNotExist(err) {
- // Ignore IsNotExist errors: most systems won't have a registries.conf.d
- // directory.
- return nil, errors.Wrapf(err, "error reading registries.conf.d")
+ if err != nil && !os.IsNotExist(err) {
+ // Ignore IsNotExist errors: most systems won't have a registries.conf.d
+ // directory.
+ return nil, errors.Wrapf(err, "error reading registries.conf.d")
+ }
}
return configs, nil
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index 2f56effae..f4f902386 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -11,7 +11,7 @@ const (
VersionPatch = 2
// VersionDev indicates development branch. Releases will be empty string.
- VersionDev = ""
+ VersionDev = "-dev"
)
// Version is the specification version that the package types support.
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 14bee92c9..ca8ec414e 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.23.2
+1.23.5
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 77eef7598..39db66641 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -8,10 +8,11 @@ require (
github.com/Microsoft/hcsshim v0.8.9
github.com/docker/go-units v0.4.0
github.com/hashicorp/go-multierror v1.1.0
- github.com/klauspost/compress v1.10.11
- github.com/klauspost/pgzip v1.2.4
+ github.com/klauspost/compress v1.11.0
+ github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.10
github.com/mistifyio/go-zfs v2.1.1+incompatible
+ github.com/moby/sys/mountinfo v0.1.3
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/runc v1.0.0-rc91
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index 04d48eb4f..d1fb711b1 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -62,10 +62,10 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.10.11 h1:K9z59aO18Aywg2b/WSgBaUX99mHy2BES18Cr5lBKZHk=
-github.com/klauspost/compress v1.10.11/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
-github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg=
+github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
+github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index dc21f75fd..52577299c 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -1342,6 +1342,7 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
if err != nil {
return -1, err
}
+ defer idLogger.Close()
payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, io.MultiWriter(uncompressedCounter, idLogger)), metadata, storage.NewDiscardFilePutter())
if err != nil {
return -1, err
@@ -1356,7 +1357,6 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
return -1, err
}
compressor.Close()
- idLogger.Close()
if err == nil {
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil {
return -1, err
diff --git a/vendor/github.com/containers/storage/pkg/mount/mount.go b/vendor/github.com/containers/storage/pkg/mount/mount.go
index 4b888dceb..8273ab5a9 100644
--- a/vendor/github.com/containers/storage/pkg/mount/mount.go
+++ b/vendor/github.com/containers/storage/pkg/mount/mount.go
@@ -4,8 +4,6 @@ import (
"sort"
"strconv"
"strings"
-
- "github.com/containers/storage/pkg/fileutils"
)
// mountError holds an error from a mount or unmount operation
@@ -43,33 +41,6 @@ func (e *mountError) Cause() error {
return e.err
}
-// GetMounts retrieves a list of mounts for the current running process.
-func GetMounts() ([]*Info, error) {
- return parseMountTable()
-}
-
-// Mounted determines if a specified mountpoint has been mounted.
-// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
-func Mounted(mountpoint string) (bool, error) {
- entries, err := parseMountTable()
- if err != nil {
- return false, err
- }
-
- mountpoint, err = fileutils.ReadSymlinkedPath(mountpoint)
- if err != nil {
- return false, err
- }
-
- // Search the table for the mountpoint
- for _, e := range entries {
- if e.Mountpoint == mountpoint {
- return true, nil
- }
- }
- return false, nil
-}
-
// Mount will mount filesystem according to the specified configuration, on the
// condition that the target path is *not* already mounted. Options must be
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go
index e3fc3535e..efc6c406e 100644
--- a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go
+++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go
@@ -1,40 +1,21 @@
package mount
-// Info reveals information about a particular mounted filesystem. This
-// struct is populated from the content in the /proc/<pid>/mountinfo file.
-type Info struct {
- // ID is a unique identifier of the mount (may be reused after umount).
- ID int
+import (
+ "github.com/containers/storage/pkg/fileutils"
+ "github.com/moby/sys/mountinfo"
+)
- // Parent indicates the ID of the mount parent (or of self for the top of the
- // mount tree).
- Parent int
+type Info = mountinfo.Info
- // Major indicates one half of the device ID which identifies the device class.
- Major int
-
- // Minor indicates one half of the device ID which identifies a specific
- // instance of device.
- Minor int
-
- // Root of the mount within the filesystem.
- Root string
-
- // Mountpoint indicates the mount point relative to the process's root.
- Mountpoint string
-
- // Opts represents mount-specific options.
- Opts string
-
- // Optional represents optional fields.
- Optional string
-
- // Fstype indicates the type of filesystem, such as EXT3.
- Fstype string
-
- // Source indicates filesystem specific information or "none".
- Source string
+func GetMounts() ([]*Info, error) {
+ return mountinfo.GetMounts(nil)
+}
- // VfsOpts represents per super block options.
- VfsOpts string
+// Mounted determines if a specified mountpoint has been mounted.
+func Mounted(mountpoint string) (bool, error) {
+ mountpoint, err := fileutils.ReadSymlinkedPath(mountpoint)
+ if err != nil {
+ return false, err
+ }
+ return mountinfo.Mounted(mountpoint)
}
diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go
index 19556d06b..cbc0299fb 100644
--- a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go
+++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go
@@ -1,120 +1,5 @@
package mount
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strconv"
- "strings"
+import "github.com/moby/sys/mountinfo"
- "github.com/pkg/errors"
-)
-
-// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
-// bind mounts
-func parseMountTable() ([]*Info, error) {
- f, err := os.Open("/proc/self/mountinfo")
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- return parseInfoFile(f)
-}
-
-func parseInfoFile(r io.Reader) ([]*Info, error) {
- s := bufio.NewScanner(r)
- out := []*Info{}
-
- for s.Scan() {
- /*
- 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
- (0)(1)(2) (3) (4) (5) (6) (7) (8) (9) (10)
-
- (0) mount ID: unique identifier of the mount (may be reused after umount)
- (1) parent ID: ID of parent (or of self for the top of the mount tree)
- (2) major:minor: value of st_dev for files on filesystem
- (3) root: root of the mount within the filesystem
- (4) mount point: mount point relative to the process's root
- (5) mount options: per mount options
- (6) optional fields: zero or more fields of the form "tag[:value]"
- (7) separator: marks the end of the optional fields
- (8) filesystem type: name of filesystem of the form "type[.subtype]"
- (9) mount source: filesystem specific information or "none"
- (10) super options: per super block options
- */
- text := s.Text()
- fields := strings.Split(text, " ")
- numFields := len(fields)
- if numFields < 10 {
- // should be at least 10 fields
- return nil, errors.Errorf("Parsing %q failed: not enough fields (%d)", text, numFields)
- }
-
- p := &Info{}
- // ignore any number parsing errors, there should not be any
- p.ID, _ = strconv.Atoi(fields[0])
- p.Parent, _ = strconv.Atoi(fields[1])
- mm := strings.Split(fields[2], ":")
- if len(mm) != 2 {
- return nil, fmt.Errorf("Parsing %q failed: unexpected minor:major pair %s", text, mm)
- }
- p.Major, _ = strconv.Atoi(mm[0])
- p.Minor, _ = strconv.Atoi(mm[1])
- p.Root = fields[3]
- p.Mountpoint = fields[4]
- p.Opts = fields[5]
-
- // one or more optional fields, when a separator (-)
- i := 6
- for ; i < numFields && fields[i] != "-"; i++ {
- switch i {
- case 6:
- p.Optional = string(fields[6])
- default:
- /* NOTE there might be more optional fields before the separator,
- such as fields[7] or fields[8], although as of Linux kernel 5.5
- the only known ones are mount propagation flags in fields[6].
- The correct behavior is to ignore any unknown optional fields.
- */
- }
- }
- if i == numFields {
- return nil, fmt.Errorf("Parsing %q failed: missing - separator", text)
- }
-
- // There should be 3 fields after the separator...
- if i+4 > numFields {
- return nil, fmt.Errorf("Parsing %q failed: not enough fields after a - separator", text)
- }
- // ... but in Linux <= 3.9 mounting a cifs with spaces in a share name
- // (like "//serv/My Documents") _may_ end up having a space in the last field
- // of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs
- // option unc= is ignored, so a space should not appear. In here we ignore
- // those "extra" fields caused by extra spaces.
- p.Fstype = fields[i+1]
- p.Source = fields[i+2]
- p.VfsOpts = fields[i+3]
-
- out = append(out, p)
- }
- if err := s.Err(); err != nil {
- return nil, err
- }
-
- return out, nil
-}
-
-// PidMountInfo collects the mounts for a specific process ID. If the process
-// ID is unknown, it is better to use `GetMounts` which will inspect
-// "/proc/self/mountinfo" instead.
-func PidMountInfo(pid int) ([]*Info, error) {
- f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- return parseInfoFile(f)
-}
+var PidMountInfo = mountinfo.PidMountInfo
diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go
deleted file mode 100644
index 6cde1ed77..000000000
--- a/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !linux
-
-package mount
-
-import (
- "fmt"
- "runtime"
-)
-
-func parseMountTable() ([]*Info, error) {
- return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
-}
diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go
index d65d52718..b22263fe4 100644
--- a/vendor/github.com/containers/storage/utils.go
+++ b/vendor/github.com/containers/storage/utils.go
@@ -198,7 +198,7 @@ func getRootlessDirInfo(rootlessUID int) (string, string, error) {
}
// getRootlessStorageOpts returns the storage opts for containers running as non root
-func getRootlessStorageOpts(rootlessUID int) (StoreOptions, error) {
+func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) {
var opts StoreOptions
dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID)
@@ -207,10 +207,20 @@ func getRootlessStorageOpts(rootlessUID int) (StoreOptions, error) {
}
opts.RunRoot = rootlessRuntime
opts.GraphRoot = filepath.Join(dataDir, "containers", "storage")
- opts.RootlessStoragePath = opts.GraphRoot
+ if systemOpts.RootlessStoragePath != "" {
+ opts.RootlessStoragePath = systemOpts.RootlessStoragePath
+ } else {
+ opts.RootlessStoragePath = opts.GraphRoot
+ }
if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
opts.GraphDriverName = "overlay"
opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)}
+ for _, o := range systemOpts.GraphDriverOptions {
+ if strings.Contains(o, "ignore_chown_errors") {
+ opts.GraphDriverOptions = append(opts.GraphDriverOptions, o)
+ break
+ }
+ }
} else {
opts.GraphDriverName = "vfs"
}
@@ -242,7 +252,7 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
)
storageOpts := defaultStoreOptions
if rootless && rootlessUID != 0 {
- storageOpts, err = getRootlessStorageOpts(rootlessUID)
+ storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts)
if err != nil {
return storageOpts, err
}
diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go
index c9ba64707..782a34b22 100644
--- a/vendor/github.com/gorilla/mux/mux.go
+++ b/vendor/github.com/gorilla/mux/mux.go
@@ -435,8 +435,7 @@ func Vars(r *http.Request) map[string]string {
// CurrentRoute returns the matched route for the current request, if any.
// This only works when called inside the handler of the matched route
// because the matched route is stored in the request context which is cleared
-// after the handler returns, unless the KeepContext option is set on the
-// Router.
+// after the handler returns.
func CurrentRoute(r *http.Request) *Route {
if rv := r.Context().Value(routeKey); rv != nil {
return rv.(*Route)
diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
index 96dd94ad1..0144842bb 100644
--- a/vendor/github.com/gorilla/mux/regexp.go
+++ b/vendor/github.com/gorilla/mux/regexp.go
@@ -325,6 +325,12 @@ func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
// Store host variables.
if v.host != nil {
host := getHost(req)
+ if v.host.wildcardHostPort {
+ // Don't be strict on the port match
+ if i := strings.Index(host, ":"); i != -1 {
+ host = host[:i]
+ }
+ }
matches := v.host.regexp.FindStringSubmatchIndex(host)
if len(matches) > 0 {
extractVars(host, matches, v.host.varsN, m.Vars)
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
index 02fc81e06..876abb500 100644
--- a/vendor/github.com/imdario/mergo/README.md
+++ b/vendor/github.com/imdario/mergo/README.md
@@ -1,44 +1,54 @@
# Mergo
-A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
-
-Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
-
-## Status
-
-It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
[![GoDoc][3]][4]
-[![GoCard][5]][6]
+[![GitHub release][5]][6]
+[![GoCard][7]][8]
[![Build Status][1]][2]
-[![Coverage Status][7]][8]
-[![Sourcegraph][9]][10]
-[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield)
+[![Coverage Status][9]][10]
+[![Sourcegraph][11]][12]
+[![FOSSA Status][13]][14]
+
+[![GoCenter Kudos][15]][16]
[1]: https://travis-ci.org/imdario/mergo.png
[2]: https://travis-ci.org/imdario/mergo
[3]: https://godoc.org/github.com/imdario/mergo?status.svg
[4]: https://godoc.org/github.com/imdario/mergo
-[5]: https://goreportcard.com/badge/imdario/mergo
-[6]: https://goreportcard.com/report/github.com/imdario/mergo
-[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
-[8]: https://coveralls.io/github/imdario/mergo?branch=master
-[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
-[10]: https://sourcegraph.com/github.com/imdario/mergo?badge
+[5]: https://img.shields.io/github/release/imdario/mergo.svg
+[6]: https://github.com/imdario/mergo/releases
+[7]: https://goreportcard.com/badge/imdario/mergo
+[8]: https://goreportcard.com/report/github.com/imdario/mergo
+[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
+[10]: https://coveralls.io/github/imdario/mergo?branch=master
+[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
+[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
+[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
+[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
+[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo
+[16]: https://search.gocenter.io/github.com/imdario/mergo
-### Latest release
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
-[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7).
+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
+
+## Status
+
+It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
### Important note
-Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code.
+Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules.
-If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
+Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
+
+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
### Donations
-If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes:
+If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo)
@@ -87,8 +97,9 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month
- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
+- [janoszen/containerssh](https://github.com/janoszen/containerssh)
-## Installation
+## Install
go get github.com/imdario/mergo
@@ -99,7 +110,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month
## Usage
-You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
```go
if err := mergo.Merge(&dst, src); err != nil {
@@ -125,9 +136,7 @@ if err := mergo.Map(&dst, srcMap); err != nil {
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
-More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
-
-### Nice example
+Here is a nice example:
```go
package main
@@ -175,10 +184,10 @@ import (
"time"
)
-type timeTransfomer struct {
+type timeTransformer struct {
}
-func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ == reflect.TypeOf(time.Time{}) {
return func(dst, src reflect.Value) error {
if dst.CanSet() {
@@ -202,7 +211,7 @@ type Snapshot struct {
func main() {
src := Snapshot{time.Now()}
dest := Snapshot{}
- mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{}))
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
fmt.Println(dest)
// Will print
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go
index 6e9aa7baf..fcd985f99 100644
--- a/vendor/github.com/imdario/mergo/doc.go
+++ b/vendor/github.com/imdario/mergo/doc.go
@@ -4,41 +4,140 @@
// license that can be found in the LICENSE file.
/*
-Package mergo merges same-type structs and maps by setting default values in zero-value fields.
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
-Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Status
+
+It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
+
+Important note
+
+Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
+
+Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
+
+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
+
+Install
+
+Do your usual installation procedure:
+
+ go get github.com/imdario/mergo
+
+ // use in your .go code
+ import (
+ "github.com/imdario/mergo"
+ )
Usage
-From my own work-in-progress project:
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+ if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+ }
+
+Also, you can merge overwriting values using the transformer WithOverride.
+
+ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+ // ...
+ }
+
+Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
+
+ if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+ }
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
+
+Here is a nice example:
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/imdario/mergo"
+ )
- type networkConfig struct {
- Protocol string
- Address string
- ServerType string `json: "server_type"`
- Port uint16
+ type Foo struct {
+ A string
+ B int64
}
- type FssnConfig struct {
- Network networkConfig
+ func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+ dest := Foo{
+ A: "two",
+ }
+ mergo.Merge(&dest, src)
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
}
- var fssnDefault = FssnConfig {
- networkConfig {
- "tcp",
- "127.0.0.1",
- "http",
- 31560,
- },
+Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/imdario/mergo"
+ "reflect"
+ "time"
+ )
+
+ type timeTransformer struct {
}
- // Inside a function [...]
+ func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf(time.Time{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ isZero := dst.MethodByName("IsZero")
+ result := isZero.Call([]reflect.Value{})
+ if result[0].Bool() {
+ dst.Set(src)
+ }
+ }
+ return nil
+ }
+ }
+ return nil
+ }
+
+ type Snapshot struct {
+ Time time.Time
+ // ...
+ }
- if err := mergo.Merge(&config, fssnDefault); err != nil {
- log.Fatal(err)
+ func main() {
+ src := Snapshot{time.Now()}
+ dest := Snapshot{}
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
+ fmt.Println(dest)
+ // Will print
+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
}
- // More code [...]
+Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
+
+About
+
+Written by Dario Castañé: https://da.rio.hn
+
+License
+
+BSD 3-Clause license, as Go language.
*/
package mergo
diff --git a/vendor/github.com/imdario/mergo/go.mod b/vendor/github.com/imdario/mergo/go.mod
new file mode 100644
index 000000000..3d689d93e
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/go.mod
@@ -0,0 +1,5 @@
+module github.com/imdario/mergo
+
+go 1.13
+
+require gopkg.in/yaml.v2 v2.3.0
diff --git a/vendor/github.com/imdario/mergo/go.sum b/vendor/github.com/imdario/mergo/go.sum
new file mode 100644
index 000000000..168980da5
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/go.sum
@@ -0,0 +1,4 @@
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go
index d83258b4d..a13a7ee46 100644
--- a/vendor/github.com/imdario/mergo/map.go
+++ b/vendor/github.com/imdario/mergo/map.go
@@ -99,11 +99,11 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
continue
}
if srcKind == dstKind {
- if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
return
}
} else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
- if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
return
}
} else if srcKind == reflect.Map {
@@ -141,6 +141,9 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
}
func _map(dst, src interface{}, opts ...func(*Config)) error {
+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
+ return ErrNonPointerAgument
+ }
var (
vDst, vSrc reflect.Value
err error
@@ -157,8 +160,7 @@ func _map(dst, src interface{}, opts ...func(*Config)) error {
// To be friction-less, we redirect equal-type arguments
// to deepMerge. Only because arguments can be anything.
if vSrc.Kind() == vDst.Kind() {
- _, err := deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
- return err
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
}
switch vSrc.Kind() {
case reflect.Struct:
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
index 3332c9c2a..afa84a1e2 100644
--- a/vendor/github.com/imdario/mergo/merge.go
+++ b/vendor/github.com/imdario/mergo/merge.go
@@ -11,26 +11,26 @@ package mergo
import (
"fmt"
"reflect"
- "unsafe"
)
-func hasExportedField(dst reflect.Value) (exported bool) {
+func hasMergeableFields(dst reflect.Value) (exported bool) {
for i, n := 0, dst.NumField(); i < n; i++ {
field := dst.Type().Field(i)
- if isExportedComponent(&field) {
- return true
+ if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
+ exported = exported || hasMergeableFields(dst.Field(i))
+ } else if isExportedComponent(&field) {
+ exported = exported || len(field.PkgPath) == 0
}
}
return
}
func isExportedComponent(field *reflect.StructField) bool {
- name := field.Name
pkgPath := field.PkgPath
if len(pkgPath) > 0 {
return false
}
- c := name[0]
+ c := field.Name[0]
if 'a' <= c && c <= 'z' || c == '_' {
return false
}
@@ -44,6 +44,8 @@ type Config struct {
Transformers Transformers
overwriteWithEmptyValue bool
overwriteSliceWithEmptyValue bool
+ sliceDeepCopy bool
+ debug bool
}
type Transformers interface {
@@ -53,17 +55,16 @@ type Transformers interface {
// Traverses recursively both values, assigning src's fields values to dst.
// The map argument tracks comparisons that have already been seen, which allows
// short circuiting on recursive types.
-func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (dst reflect.Value, err error) {
- dst = dstIn
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
overwrite := config.Overwrite
typeCheck := config.TypeCheck
overwriteWithEmptySrc := config.overwriteWithEmptyValue
overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
+ sliceDeepCopy := config.sliceDeepCopy
if !src.IsValid() {
return
}
-
if dst.CanAddr() {
addr := dst.UnsafeAddr()
h := 17 * addr
@@ -71,7 +72,7 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int,
typ := dst.Type()
for p := seen; p != nil; p = p.next {
if p.ptr == addr && p.typ == typ {
- return dst, nil
+ return nil
}
}
// Remember, remember...
@@ -85,126 +86,154 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int,
}
}
- if dst.IsValid() && src.IsValid() && src.Type() != dst.Type() {
- err = fmt.Errorf("cannot append two different types (%s, %s)", src.Kind(), dst.Kind())
- return
- }
-
switch dst.Kind() {
case reflect.Struct:
- if hasExportedField(dst) {
- dstCp := reflect.New(dst.Type()).Elem()
+ if hasMergeableFields(dst) {
for i, n := 0, dst.NumField(); i < n; i++ {
- dstField := dst.Field(i)
- structField := dst.Type().Field(i)
- // copy un-exported struct fields
- if !isExportedComponent(&structField) {
- rf := dstCp.Field(i)
- rf = reflect.NewAt(rf.Type(), unsafe.Pointer(rf.UnsafeAddr())).Elem() //nolint:gosec
- dstRF := dst.Field(i)
- if !dst.Field(i).CanAddr() {
- continue
- }
-
- dstRF = reflect.NewAt(dstRF.Type(), unsafe.Pointer(dstRF.UnsafeAddr())).Elem() //nolint:gosec
- rf.Set(dstRF)
- continue
- }
- dstField, err = deepMerge(dstField, src.Field(i), visited, depth+1, config)
- if err != nil {
+ if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
return
}
- dstCp.Field(i).Set(dstField)
}
-
- if dst.CanSet() {
- dst.Set(dstCp)
- } else {
- dst = dstCp
- }
- return
} else {
if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) {
- dst = src
+ dst.Set(src)
}
}
-
case reflect.Map:
if dst.IsNil() && !src.IsNil() {
- if dst.CanSet() {
- dst.Set(reflect.MakeMap(dst.Type()))
- } else {
- dst = src
- return
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+
+ if src.Kind() != reflect.Map {
+ if overwrite {
+ dst.Set(src)
}
+ return
}
+
for _, key := range src.MapKeys() {
srcElement := src.MapIndex(key)
- dstElement := dst.MapIndex(key)
if !srcElement.IsValid() {
continue
}
- if dst.MapIndex(key).IsValid() {
- k := dstElement.Interface()
- dstElement = reflect.ValueOf(k)
- }
- if isReflectNil(srcElement) {
- if overwrite || isReflectNil(dstElement) {
- dst.SetMapIndex(key, srcElement)
+ dstElement := dst.MapIndex(key)
+ switch srcElement.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
+ if srcElement.IsNil() {
+ if overwrite {
+ dst.SetMapIndex(key, srcElement)
+ }
+ continue
+ }
+ fallthrough
+ default:
+ if !srcElement.CanInterface() {
+ continue
+ }
+ switch reflect.TypeOf(srcElement.Interface()).Kind() {
+ case reflect.Struct:
+ fallthrough
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Map:
+ srcMapElm := srcElement
+ dstMapElm := dstElement
+ if srcMapElm.CanInterface() {
+ srcMapElm = reflect.ValueOf(srcMapElm.Interface())
+ if dstMapElm.IsValid() {
+ dstMapElm = reflect.ValueOf(dstMapElm.Interface())
+ }
+ }
+ if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
+ return
+ }
+ case reflect.Slice:
+ srcSlice := reflect.ValueOf(srcElement.Interface())
+
+ var dstSlice reflect.Value
+ if !dstElement.IsValid() || dstElement.IsNil() {
+ dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
+ } else {
+ dstSlice = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
+ if typeCheck && srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
+ dstSlice = srcSlice
+ } else if config.AppendSlice {
+ if srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
+ dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
+ } else if sliceDeepCopy {
+ i := 0
+ for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ {
+ srcElement := srcSlice.Index(i)
+ dstElement := dstSlice.Index(i)
+
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ }
+
+ }
+ dst.SetMapIndex(key, dstSlice)
}
- continue
}
- if !srcElement.CanInterface() {
+ if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
continue
}
- if srcElement.CanInterface() {
- srcElement = reflect.ValueOf(srcElement.Interface())
- if dstElement.IsValid() {
- dstElement = reflect.ValueOf(dstElement.Interface())
+ if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) {
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
}
+ dst.SetMapIndex(key, srcElement)
}
- dstElement, err = deepMerge(dstElement, srcElement, visited, depth+1, config)
- if err != nil {
- return
- }
- dst.SetMapIndex(key, dstElement)
-
}
case reflect.Slice:
- newSlice := dst
- if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
- if typeCheck && src.Type() != dst.Type() {
- return dst, fmt.Errorf("cannot override two slices with different type (%s, %s)", src.Type(), dst.Type())
- }
- newSlice = src
- } else if config.AppendSlice {
- if typeCheck && src.Type() != dst.Type() {
- err = fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
- return
- }
- newSlice = reflect.AppendSlice(dst, src)
- }
- if dst.CanSet() {
- dst.Set(newSlice)
- } else {
- dst = newSlice
- }
- case reflect.Ptr, reflect.Interface:
- if isReflectNil(src) {
+ if !dst.CanSet() {
break
}
+ if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
+ dst.Set(src)
+ } else if config.AppendSlice {
+ if src.Type() != dst.Type() {
+ return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
+ }
+ dst.Set(reflect.AppendSlice(dst, src))
+ } else if sliceDeepCopy {
+ for i := 0; i < src.Len() && i < dst.Len(); i++ {
+ srcElement := src.Index(i)
+ dstElement := dst.Index(i)
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
- if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) {
- if dst.IsNil() || overwrite {
- if overwrite || isEmptyValue(dst) {
- if dst.CanSet() {
- dst.Set(src)
- } else {
- dst = src
- }
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
}
}
+ }
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Interface:
+ if isReflectNil(src) {
+ if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
break
}
@@ -214,33 +243,35 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int,
dst.Set(src)
}
} else if src.Kind() == reflect.Ptr {
- if dst, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return
}
- dst = dst.Addr()
} else if dst.Elem().Type() == src.Type() {
- if dst, err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
+ if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
return
}
} else {
- return dst, ErrDifferentArgumentsTypes
+ return ErrDifferentArgumentsTypes
}
break
}
+
if dst.IsNil() || overwrite {
- if (overwrite || isEmptyValue(dst)) && (overwriteWithEmptySrc || !isEmptyValue(src)) {
- if dst.CanSet() {
- dst.Set(src)
- } else {
- dst = src
- }
+ if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
}
- } else if _, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
- return
+ break
+ }
+
+ if dst.Elem().Kind() == src.Elem().Kind() {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ break
}
default:
- overwriteFull := (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst))
- if overwriteFull {
+ mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc)
+ if mustSet {
if dst.CanSet() {
dst.Set(src)
} else {
@@ -281,6 +312,7 @@ func WithOverride(config *Config) {
// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
func WithOverwriteWithEmptyValue(config *Config) {
+ config.Overwrite = true
config.overwriteWithEmptyValue = true
}
@@ -299,7 +331,16 @@ func WithTypeCheck(config *Config) {
config.TypeCheck = true
}
+// WithSliceDeepCopy will merge slice element one by one with Overwrite flag.
+func WithSliceDeepCopy(config *Config) {
+ config.sliceDeepCopy = true
+ config.Overwrite = true
+}
+
func merge(dst, src interface{}, opts ...func(*Config)) error {
+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
+ return ErrNonPointerAgument
+ }
var (
vDst, vSrc reflect.Value
err error
@@ -314,14 +355,10 @@ func merge(dst, src interface{}, opts ...func(*Config)) error {
if vDst, vSrc, err = resolveValues(dst, src); err != nil {
return err
}
- if !vDst.CanSet() {
- return fmt.Errorf("cannot set dst, needs reference")
- }
if vDst.Type() != vSrc.Type() {
return ErrDifferentArgumentsTypes
}
- _, err = deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
- return err
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
}
// IsReflectNil is the reflect value provided nil
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go
index a82fea2fd..3cc926c7f 100644
--- a/vendor/github.com/imdario/mergo/mergo.go
+++ b/vendor/github.com/imdario/mergo/mergo.go
@@ -20,6 +20,7 @@ var (
ErrNotSupported = errors.New("only structs and maps are supported")
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
+ ErrNonPointerAgument = errors.New("dst must be a pointer")
)
// During deepMerge, must keep track of checks that are
@@ -75,23 +76,3 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
}
return
}
-
-// Traverses recursively both values, assigning src's fields values to dst.
-// The map argument tracks comparisons that have already been seen, which allows
-// short circuiting on recursive types.
-func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
- if dst.CanAddr() {
- addr := dst.UnsafeAddr()
- h := 17 * addr
- seen := visited[h]
- typ := dst.Type()
- for p := seen; p != nil; p = p.next {
- if p.ptr == addr && p.typ == typ {
- return nil
- }
- }
- // Remember, remember...
- visited[h] = &visit{addr, typ, seen}
- }
- return // TODO refactor
-}
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
index 6d4c1e98b..4a73e1bdd 100644
--- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -127,7 +127,7 @@ func (e *fastGen) addBlock(src []byte) int32 {
// hash4 returns the hash of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash4u(u uint32, h uint8) uint32 {
- return (u * prime4bytes) >> ((32 - h) & 31)
+ return (u * prime4bytes) >> ((32 - h) & reg8SizeMask32)
}
type tableEntryPrev struct {
@@ -138,25 +138,25 @@ type tableEntryPrev struct {
// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash4x64(u uint64, h uint8) uint32 {
- return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
+ return (uint32(u) * prime4bytes) >> ((32 - h) & reg8SizeMask32)
}
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash7(u uint64, h uint8) uint32 {
- return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
+ return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
}
// hash8 returns the hash of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash8(u uint64, h uint8) uint32 {
- return uint32((u * prime8bytes) >> ((64 - h) & 63))
+ return uint32((u * prime8bytes) >> ((64 - h) & reg8SizeMask64))
}
// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash6(u uint64, h uint8) uint32 {
- return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
+ return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & reg8SizeMask64))
}
// matchlen will return the match length between offsets and t in src.
diff --git a/vendor/github.com/klauspost/compress/flate/gen_inflate.go b/vendor/github.com/klauspost/compress/flate/gen_inflate.go
index c74a95fe7..b26d19ec2 100644
--- a/vendor/github.com/klauspost/compress/flate/gen_inflate.go
+++ b/vendor/github.com/klauspost/compress/flate/gen_inflate.go
@@ -85,7 +85,7 @@ readLiteral:
return
}
f.roffset++
- b |= uint32(c) << (nb & 31)
+ b |= uint32(c) << (nb & regSizeMaskUint32)
nb += 8
}
chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
@@ -104,7 +104,7 @@ readLiteral:
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & 31)
+ f.b = b >> (n & regSizeMaskUint32)
f.nb = nb - n
v = int(chunk >> huffmanValueShift)
break
@@ -167,15 +167,15 @@ readLiteral:
return
}
}
- length += int(f.b & uint32(1<<n-1))
- f.b >>= n
+ length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
+ f.b >>= n & regSizeMaskUint32
f.nb -= n
}
- var dist int
+ var dist uint32
if f.hd == nil {
for f.nb < 5 {
- if err = moreBits(); err != nil {
+ if err = f.moreBits(); err != nil {
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -183,17 +183,19 @@ readLiteral:
return
}
}
- dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
f.b >>= 5
f.nb -= 5
} else {
- if dist, err = f.huffSym(f.hd); err != nil {
+ sym, err := f.huffSym(f.hd)
+ if err != nil {
if debugDecode {
fmt.Println("huffsym:", err)
}
f.err = err
return
}
+ dist = uint32(sym)
}
switch {
@@ -202,9 +204,9 @@ readLiteral:
case dist < maxNumDist:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << nb
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
for f.nb < nb {
- if err = moreBits(); err != nil {
+ if err = f.moreBits(); err != nil {
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
@@ -212,10 +214,10 @@ readLiteral:
return
}
}
- extra |= int(f.b & uint32(1<<nb-1))
- f.b >>= nb
+ extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
+ f.b >>= nb & regSizeMaskUint32
f.nb -= nb
- dist = 1<<(nb+1) + 1 + extra
+ dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
default:
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
@@ -225,7 +227,7 @@ readLiteral:
}
// No check on length; encoding can be prescient.
- if dist > f.dict.histSize() {
+ if dist > uint32(f.dict.histSize()) {
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
}
@@ -233,7 +235,7 @@ readLiteral:
return
}
- f.copyLen, f.copyDist = length, dist
+ f.copyLen, f.copyDist = length, int(dist)
goto copyHistory
}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index 53fe1d06e..208d66711 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -206,7 +206,7 @@ func (w *huffmanBitWriter) write(b []byte) {
}
func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
- w.bits |= uint64(b) << (w.nbits & 63)
+ w.bits |= uint64(b) << (w.nbits & reg16SizeMask64)
w.nbits += nb
if w.nbits >= 48 {
w.writeOutBits()
@@ -759,7 +759,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
} else {
// inlined
c := lengths[lengthCode&31]
- w.bits |= uint64(c.code) << (w.nbits & 63)
+ w.bits |= uint64(c.code) << (w.nbits & reg16SizeMask64)
w.nbits += c.len
if w.nbits >= 48 {
w.writeOutBits()
@@ -779,7 +779,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
} else {
// inlined
c := offs[offsetCode&31]
- w.bits |= uint64(c.code) << (w.nbits & 63)
+ w.bits |= uint64(c.code) << (w.nbits & reg16SizeMask64)
w.nbits += c.len
if w.nbits >= 48 {
w.writeOutBits()
@@ -878,7 +878,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
for _, t := range input {
// Bitwriting inlined, ~30% speedup
c := encoding[t]
- w.bits |= uint64(c.code) << ((w.nbits) & 63)
+ w.bits |= uint64(c.code) << ((w.nbits) & reg16SizeMask64)
w.nbits += c.len
if w.nbits >= 48 {
bits := w.bits
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
index 3e4259f15..189e9fe0b 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -522,8 +522,8 @@ func (f *decompressor) readHuffman() error {
return err
}
}
- rep += int(f.b & uint32(1<<nb-1))
- f.b >>= nb
+ rep += int(f.b & uint32(1<<(nb&regSizeMaskUint32)-1))
+ f.b >>= nb & regSizeMaskUint32
f.nb -= nb
if i+rep > n {
if debugDecode {
@@ -603,7 +603,7 @@ readLiteral:
return
}
f.roffset++
- b |= uint32(c) << (nb & 31)
+ b |= uint32(c) << (nb & regSizeMaskUint32)
nb += 8
}
chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
@@ -622,7 +622,7 @@ readLiteral:
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & 31)
+ f.b = b >> (n & regSizeMaskUint32)
f.nb = nb - n
v = int(chunk >> huffmanValueShift)
break
@@ -685,12 +685,12 @@ readLiteral:
return
}
}
- length += int(f.b & uint32(1<<n-1))
- f.b >>= n
+ length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
+ f.b >>= n & regSizeMaskUint32
f.nb -= n
}
- var dist int
+ var dist uint32
if f.hd == nil {
for f.nb < 5 {
if err = f.moreBits(); err != nil {
@@ -701,17 +701,19 @@ readLiteral:
return
}
}
- dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
f.b >>= 5
f.nb -= 5
} else {
- if dist, err = f.huffSym(f.hd); err != nil {
+ sym, err := f.huffSym(f.hd)
+ if err != nil {
if debugDecode {
fmt.Println("huffsym:", err)
}
f.err = err
return
}
+ dist = uint32(sym)
}
switch {
@@ -720,7 +722,7 @@ readLiteral:
case dist < maxNumDist:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << nb
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
for f.nb < nb {
if err = f.moreBits(); err != nil {
if debugDecode {
@@ -730,10 +732,10 @@ readLiteral:
return
}
}
- extra |= int(f.b & uint32(1<<nb-1))
- f.b >>= nb
+ extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
+ f.b >>= nb & regSizeMaskUint32
f.nb -= nb
- dist = 1<<(nb+1) + 1 + extra
+ dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
default:
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
@@ -743,7 +745,7 @@ readLiteral:
}
// No check on length; encoding can be prescient.
- if dist > f.dict.histSize() {
+ if dist > uint32(f.dict.histSize()) {
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
}
@@ -751,7 +753,7 @@ readLiteral:
return
}
- f.copyLen, f.copyDist = length, dist
+ f.copyLen, f.copyDist = length, int(dist)
goto copyHistory
}
@@ -869,7 +871,7 @@ func (f *decompressor) moreBits() error {
return noEOF(err)
}
f.roffset++
- f.b |= uint32(c) << f.nb
+ f.b |= uint32(c) << (f.nb & regSizeMaskUint32)
f.nb += 8
return nil
}
@@ -894,7 +896,7 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
return 0, noEOF(err)
}
f.roffset++
- b |= uint32(c) << (nb & 31)
+ b |= uint32(c) << (nb & regSizeMaskUint32)
nb += 8
}
chunk := h.chunks[b&(huffmanNumChunks-1)]
@@ -913,7 +915,7 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
f.err = CorruptInputError(f.roffset)
return 0, f.err
}
- f.b = b >> (n & 31)
+ f.b = b >> (n & regSizeMaskUint32)
f.nb = nb - n
return int(chunk >> huffmanValueShift), nil
}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
index 397dc1b1a..9a92a1b30 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
@@ -63,7 +63,7 @@ readLiteral:
return
}
f.roffset++
- b |= uint32(c) << (nb & 31)
+ b |= uint32(c) << (nb & regSizeMaskUint32)
nb += 8
}
chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
@@ -82,7 +82,7 @@ readLiteral:
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & 31)
+ f.b = b >> (n & regSizeMaskUint32)
f.nb = nb - n
v = int(chunk >> huffmanValueShift)
break
@@ -145,15 +145,15 @@ readLiteral:
return
}
}
- length += int(f.b & uint32(1<<n-1))
- f.b >>= n
+ length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
+ f.b >>= n & regSizeMaskUint32
f.nb -= n
}
- var dist int
+ var dist uint32
if f.hd == nil {
for f.nb < 5 {
- if err = moreBits(); err != nil {
+ if err = f.moreBits(); err != nil {
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -161,17 +161,19 @@ readLiteral:
return
}
}
- dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
f.b >>= 5
f.nb -= 5
} else {
- if dist, err = f.huffSym(f.hd); err != nil {
+ sym, err := f.huffSym(f.hd)
+ if err != nil {
if debugDecode {
fmt.Println("huffsym:", err)
}
f.err = err
return
}
+ dist = uint32(sym)
}
switch {
@@ -180,9 +182,9 @@ readLiteral:
case dist < maxNumDist:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << nb
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
for f.nb < nb {
- if err = moreBits(); err != nil {
+ if err = f.moreBits(); err != nil {
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
@@ -190,10 +192,10 @@ readLiteral:
return
}
}
- extra |= int(f.b & uint32(1<<nb-1))
- f.b >>= nb
+ extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
+ f.b >>= nb & regSizeMaskUint32
f.nb -= nb
- dist = 1<<(nb+1) + 1 + extra
+ dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
default:
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
@@ -203,7 +205,7 @@ readLiteral:
}
// No check on length; encoding can be prescient.
- if dist > f.dict.histSize() {
+ if dist > uint32(f.dict.histSize()) {
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
}
@@ -211,7 +213,7 @@ readLiteral:
return
}
- f.copyLen, f.copyDist = length, dist
+ f.copyLen, f.copyDist = length, int(dist)
goto copyHistory
}
@@ -287,7 +289,7 @@ readLiteral:
return
}
f.roffset++
- b |= uint32(c) << (nb & 31)
+ b |= uint32(c) << (nb & regSizeMaskUint32)
nb += 8
}
chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
@@ -306,7 +308,7 @@ readLiteral:
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & 31)
+ f.b = b >> (n & regSizeMaskUint32)
f.nb = nb - n
v = int(chunk >> huffmanValueShift)
break
@@ -369,15 +371,15 @@ readLiteral:
return
}
}
- length += int(f.b & uint32(1<<n-1))
- f.b >>= n
+ length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
+ f.b >>= n & regSizeMaskUint32
f.nb -= n
}
- var dist int
+ var dist uint32
if f.hd == nil {
for f.nb < 5 {
- if err = moreBits(); err != nil {
+ if err = f.moreBits(); err != nil {
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -385,17 +387,19 @@ readLiteral:
return
}
}
- dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
f.b >>= 5
f.nb -= 5
} else {
- if dist, err = f.huffSym(f.hd); err != nil {
+ sym, err := f.huffSym(f.hd)
+ if err != nil {
if debugDecode {
fmt.Println("huffsym:", err)
}
f.err = err
return
}
+ dist = uint32(sym)
}
switch {
@@ -404,9 +408,9 @@ readLiteral:
case dist < maxNumDist:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << nb
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
for f.nb < nb {
- if err = moreBits(); err != nil {
+ if err = f.moreBits(); err != nil {
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
@@ -414,10 +418,10 @@ readLiteral:
return
}
}
- extra |= int(f.b & uint32(1<<nb-1))
- f.b >>= nb
+ extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
+ f.b >>= nb & regSizeMaskUint32
f.nb -= nb
- dist = 1<<(nb+1) + 1 + extra
+ dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
default:
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
@@ -427,7 +431,7 @@ readLiteral:
}
// No check on length; encoding can be prescient.
- if dist > f.dict.histSize() {
+ if dist > uint32(f.dict.histSize()) {
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
}
@@ -435,7 +439,7 @@ readLiteral:
return
}
- f.copyLen, f.copyDist = length, dist
+ f.copyLen, f.copyDist = length, int(dist)
goto copyHistory
}
@@ -511,7 +515,7 @@ readLiteral:
return
}
f.roffset++
- b |= uint32(c) << (nb & 31)
+ b |= uint32(c) << (nb & regSizeMaskUint32)
nb += 8
}
chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
@@ -530,7 +534,7 @@ readLiteral:
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & 31)
+ f.b = b >> (n & regSizeMaskUint32)
f.nb = nb - n
v = int(chunk >> huffmanValueShift)
break
@@ -593,15 +597,15 @@ readLiteral:
return
}
}
- length += int(f.b & uint32(1<<n-1))
- f.b >>= n
+ length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
+ f.b >>= n & regSizeMaskUint32
f.nb -= n
}
- var dist int
+ var dist uint32
if f.hd == nil {
for f.nb < 5 {
- if err = moreBits(); err != nil {
+ if err = f.moreBits(); err != nil {
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -609,17 +613,19 @@ readLiteral:
return
}
}
- dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
f.b >>= 5
f.nb -= 5
} else {
- if dist, err = f.huffSym(f.hd); err != nil {
+ sym, err := f.huffSym(f.hd)
+ if err != nil {
if debugDecode {
fmt.Println("huffsym:", err)
}
f.err = err
return
}
+ dist = uint32(sym)
}
switch {
@@ -628,9 +634,9 @@ readLiteral:
case dist < maxNumDist:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << nb
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
for f.nb < nb {
- if err = moreBits(); err != nil {
+ if err = f.moreBits(); err != nil {
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
@@ -638,10 +644,10 @@ readLiteral:
return
}
}
- extra |= int(f.b & uint32(1<<nb-1))
- f.b >>= nb
+ extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
+ f.b >>= nb & regSizeMaskUint32
f.nb -= nb
- dist = 1<<(nb+1) + 1 + extra
+ dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
default:
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
@@ -651,7 +657,7 @@ readLiteral:
}
// No check on length; encoding can be prescient.
- if dist > f.dict.histSize() {
+ if dist > uint32(f.dict.histSize()) {
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
}
@@ -659,7 +665,7 @@ readLiteral:
return
}
- f.copyLen, f.copyDist = length, dist
+ f.copyLen, f.copyDist = length, int(dist)
goto copyHistory
}
@@ -735,7 +741,7 @@ readLiteral:
return
}
f.roffset++
- b |= uint32(c) << (nb & 31)
+ b |= uint32(c) << (nb & regSizeMaskUint32)
nb += 8
}
chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
@@ -754,7 +760,7 @@ readLiteral:
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & 31)
+ f.b = b >> (n & regSizeMaskUint32)
f.nb = nb - n
v = int(chunk >> huffmanValueShift)
break
@@ -817,15 +823,15 @@ readLiteral:
return
}
}
- length += int(f.b & uint32(1<<n-1))
- f.b >>= n
+ length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
+ f.b >>= n & regSizeMaskUint32
f.nb -= n
}
- var dist int
+ var dist uint32
if f.hd == nil {
for f.nb < 5 {
- if err = moreBits(); err != nil {
+ if err = f.moreBits(); err != nil {
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -833,17 +839,19 @@ readLiteral:
return
}
}
- dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
f.b >>= 5
f.nb -= 5
} else {
- if dist, err = f.huffSym(f.hd); err != nil {
+ sym, err := f.huffSym(f.hd)
+ if err != nil {
if debugDecode {
fmt.Println("huffsym:", err)
}
f.err = err
return
}
+ dist = uint32(sym)
}
switch {
@@ -852,9 +860,9 @@ readLiteral:
case dist < maxNumDist:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << nb
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
for f.nb < nb {
- if err = moreBits(); err != nil {
+ if err = f.moreBits(); err != nil {
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
@@ -862,10 +870,10 @@ readLiteral:
return
}
}
- extra |= int(f.b & uint32(1<<nb-1))
- f.b >>= nb
+ extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
+ f.b >>= nb & regSizeMaskUint32
f.nb -= nb
- dist = 1<<(nb+1) + 1 + extra
+ dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
default:
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
@@ -875,7 +883,7 @@ readLiteral:
}
// No check on length; encoding can be prescient.
- if dist > f.dict.histSize() {
+ if dist > uint32(f.dict.histSize()) {
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
}
@@ -883,7 +891,7 @@ readLiteral:
return
}
- f.copyLen, f.copyDist = length, dist
+ f.copyLen, f.copyDist = length, int(dist)
goto copyHistory
}
diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
new file mode 100644
index 000000000..6ed28061b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
@@ -0,0 +1,37 @@
+package flate
+
+const (
+ // Masks for shifts with register sizes of the shift value.
+ // This can be used to work around the x86 design of shifting by mod register size.
+ // It can be used when a variable shift is always smaller than the register size.
+
+ // reg8SizeMaskX - shift value is 8 bits, shifted is X
+ reg8SizeMask8 = 7
+ reg8SizeMask16 = 15
+ reg8SizeMask32 = 31
+ reg8SizeMask64 = 63
+
+ // reg16SizeMaskX - shift value is 16 bits, shifted is X
+ reg16SizeMask8 = reg8SizeMask8
+ reg16SizeMask16 = reg8SizeMask16
+ reg16SizeMask32 = reg8SizeMask32
+ reg16SizeMask64 = reg8SizeMask64
+
+ // reg32SizeMaskX - shift value is 32 bits, shifted is X
+ reg32SizeMask8 = reg8SizeMask8
+ reg32SizeMask16 = reg8SizeMask16
+ reg32SizeMask32 = reg8SizeMask32
+ reg32SizeMask64 = reg8SizeMask64
+
+ // reg64SizeMaskX - shift value is 64 bits, shifted is X
+ reg64SizeMask8 = reg8SizeMask8
+ reg64SizeMask16 = reg8SizeMask16
+ reg64SizeMask32 = reg8SizeMask32
+ reg64SizeMask64 = reg8SizeMask64
+
+ // regSizeMaskUintX - shift value is uint, shifted is X
+ regSizeMaskUint8 = reg8SizeMask8
+ regSizeMaskUint16 = reg8SizeMask16
+ regSizeMaskUint32 = reg8SizeMask32
+ regSizeMaskUint64 = reg8SizeMask64
+)
diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go
new file mode 100644
index 000000000..f477a5d6e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go
@@ -0,0 +1,39 @@
+//+build !amd64
+
+package flate
+
+const (
+ // Masks for shifts with register sizes of the shift value.
+ // This can be used to work around the x86 design of shifting by mod register size.
+ // It can be used when a variable shift is always smaller than the register size.
+
+ // reg8SizeMaskX - shift value is 8 bits, shifted is X
+ reg8SizeMask8 = 0xff
+ reg8SizeMask16 = 0xff
+ reg8SizeMask32 = 0xff
+ reg8SizeMask64 = 0xff
+
+ // reg16SizeMaskX - shift value is 16 bits, shifted is X
+ reg16SizeMask8 = 0xffff
+ reg16SizeMask16 = 0xffff
+ reg16SizeMask32 = 0xffff
+ reg16SizeMask64 = 0xffff
+
+ // reg32SizeMaskX - shift value is 32 bits, shifted is X
+ reg32SizeMask8 = 0xffffffff
+ reg32SizeMask16 = 0xffffffff
+ reg32SizeMask32 = 0xffffffff
+ reg32SizeMask64 = 0xffffffff
+
+ // reg64SizeMaskX - shift value is 64 bits, shifted is X
+ reg64SizeMask8 = 0xffffffffffffffff
+ reg64SizeMask16 = 0xffffffffffffffff
+ reg64SizeMask32 = 0xffffffffffffffff
+ reg64SizeMask64 = 0xffffffffffffffff
+
+ // regSizeMaskUintX - shift value is uint, shifted is X
+ regSizeMaskUint8 = ^uint(0)
+ regSizeMaskUint16 = ^uint(0)
+ regSizeMaskUint32 = ^uint(0)
+ regSizeMaskUint64 = ^uint(0)
+)
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index 5dd66854b..7ec2022b6 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -119,6 +119,16 @@ type Scratch struct {
huffWeight [maxSymbolValue + 1]byte
}
+// TransferCTable will transfer the previously used compression table.
+func (s *Scratch) TransferCTable(src *Scratch) {
+ if cap(s.prevTable) < len(src.prevTable) {
+ s.prevTable = make(cTable, 0, maxSymbolValue+1)
+ }
+ s.prevTable = s.prevTable[:len(src.prevTable)]
+ copy(s.prevTable, src.prevTable)
+ s.prevTableLog = src.prevTableLog
+}
+
func (s *Scratch) prepare(in []byte) (*Scratch, error) {
if len(in) > BlockSizeMax {
return nil, ErrTooBig
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index ac3640dc9..ea3e51082 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -5,7 +5,6 @@ It offers a very wide range of compression / speed trade-off, while being backed
A high performance compression algorithm is implemented. For now focused on speed.
This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content.
-Note that custom dictionaries are only supported for decompression.
This package is pure Go and without use of "unsafe".
@@ -232,41 +231,6 @@ nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83
nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53
```
-### Converters
-
-As part of the development process a *Snappy* -> *Zstandard* converter was also built.
-
-This can convert a *framed* [Snappy Stream](https://godoc.org/github.com/golang/snappy#Writer) to a zstd stream.
-Note that a single block is not framed.
-
-Conversion is done by converting the stream directly from Snappy without intermediate full decoding.
-Therefore the compression ratio is much less than what can be done by a full decompression
-and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without
-any errors being generated.
-No CRC value is being generated and not all CRC values of the Snappy stream are checked.
-However, it provides really fast re-compression of Snappy streams.
-
-
-```
-BenchmarkSnappy_ConvertSilesia-8 1 1156001600 ns/op 183.35 MB/s
-Snappy len 103008711 -> zstd len 82687318
-
-BenchmarkSnappy_Enwik9-8 1 6472998400 ns/op 154.49 MB/s
-Snappy len 508028601 -> zstd len 390921079
-```
-
-
-```Go
- s := zstd.SnappyConverter{}
- n, err = s.Convert(input, output)
- if err != nil {
- fmt.Println("Re-compressed stream to", n, "bytes")
- }
-```
-
-The converter `s` can be reused to avoid allocations, even after errors.
-
-
## Decompressor
Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
@@ -337,6 +301,21 @@ A re-used Decoder will still contain the dictionaries registered.
When registering multiple dictionaries with the same ID, the last one will be used.
+It is possible to use dictionaries when compressing data.
+
+To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used
+and it will likely be used even if it doesn't improve compression.
+
+The used dictionary must be used to decompress the content.
+
+For any real gains, the dictionary should be built with similar data.
+If an unsuitable dictionary is used the output may be slightly larger than using no dictionary.
+Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data.
+For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression).
+
+For now there is a fixed startup performance penalty for compressing content with dictionaries.
+This will likely be improved over time. Just be aware to test performance when implementing.
+
### Allocation-less operation
The decoder has been designed to operate without allocations after a warmup.
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index c8ec6e331..4733ea876 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -646,7 +646,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
} else {
if hist.huffTree != nil && huff != nil {
- if hist.dict == nil || hist.dict.litDec != hist.huffTree {
+ if hist.dict == nil || hist.dict.litEnc != hist.huffTree {
huffDecoderPool.Put(hist.huffTree)
}
hist.huffTree = nil
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index be718afd4..083fbb502 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -14,12 +14,13 @@ import (
)
type blockEnc struct {
- size int
- literals []byte
- sequences []seq
- coders seqCoders
- litEnc *huff0.Scratch
- wr bitWriter
+ size int
+ literals []byte
+ sequences []seq
+ coders seqCoders
+ litEnc *huff0.Scratch
+ dictLitEnc *huff0.Scratch
+ wr bitWriter
extraLits int
last bool
@@ -314,19 +315,19 @@ func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {
}
// encodeLits can be used if the block is only litLen.
-func (b *blockEnc) encodeLits(raw bool) error {
+func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
var bh blockHeader
bh.setLast(b.last)
- bh.setSize(uint32(len(b.literals)))
+ bh.setSize(uint32(len(lits)))
// Don't compress extremely small blocks
- if len(b.literals) < 32 || raw {
+ if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw {
if debug {
- println("Adding RAW block, length", len(b.literals), "last:", b.last)
+ println("Adding RAW block, length", len(lits), "last:", b.last)
}
bh.setType(blockTypeRaw)
b.output = bh.appendTo(b.output)
- b.output = append(b.output, b.literals...)
+ b.output = append(b.output, lits...)
return nil
}
@@ -335,13 +336,18 @@ func (b *blockEnc) encodeLits(raw bool) error {
reUsed, single bool
err error
)
- if len(b.literals) >= 1024 {
+ if b.dictLitEnc != nil {
+ b.litEnc.TransferCTable(b.dictLitEnc)
+ b.litEnc.Reuse = huff0.ReusePolicyAllow
+ b.dictLitEnc = nil
+ }
+ if len(lits) >= 1024 {
// Use 4 Streams.
- out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
- } else if len(b.literals) > 32 {
+ out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
+ } else if len(lits) > 32 {
// Use 1 stream
single = true
- out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
+ out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
} else {
err = huff0.ErrIncompressible
}
@@ -349,19 +355,19 @@ func (b *blockEnc) encodeLits(raw bool) error {
switch err {
case huff0.ErrIncompressible:
if debug {
- println("Adding RAW block, length", len(b.literals), "last:", b.last)
+ println("Adding RAW block, length", len(lits), "last:", b.last)
}
bh.setType(blockTypeRaw)
b.output = bh.appendTo(b.output)
- b.output = append(b.output, b.literals...)
+ b.output = append(b.output, lits...)
return nil
case huff0.ErrUseRLE:
if debug {
- println("Adding RLE block, length", len(b.literals))
+ println("Adding RLE block, length", len(lits))
}
bh.setType(blockTypeRLE)
b.output = bh.appendTo(b.output)
- b.output = append(b.output, b.literals[0])
+ b.output = append(b.output, lits[0])
return nil
default:
return err
@@ -384,7 +390,7 @@ func (b *blockEnc) encodeLits(raw bool) error {
lh.setType(literalsBlockCompressed)
}
// Set sizes
- lh.setSizes(len(out), len(b.literals), single)
+ lh.setSizes(len(out), len(lits), single)
bh.setSize(uint32(len(out) + lh.size() + 1))
// Write block headers.
@@ -444,13 +450,19 @@ func fuzzFseEncoder(data []byte) int {
}
// encode will encode the block and append the output in b.output.
-func (b *blockEnc) encode(raw, rawAllLits bool) error {
+// Previous offset codes must be pushed if more blocks are expected.
+func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if len(b.sequences) == 0 {
- return b.encodeLits(rawAllLits)
+ return b.encodeLits(b.literals, rawAllLits)
}
- // We want some difference
- if len(b.literals) > (b.size - (b.size >> 5)) {
- return errIncompressible
+ // We want some difference to at least account for the headers.
+ saved := b.size - len(b.literals) - (b.size >> 5)
+ if saved < 16 {
+ if org == nil {
+ return errIncompressible
+ }
+ b.popOffsets()
+ return b.encodeLits(org, rawAllLits)
}
var bh blockHeader
@@ -466,6 +478,11 @@ func (b *blockEnc) encode(raw, rawAllLits bool) error {
reUsed, single bool
err error
)
+ if b.dictLitEnc != nil {
+ b.litEnc.TransferCTable(b.dictLitEnc)
+ b.litEnc.Reuse = huff0.ReusePolicyAllow
+ b.dictLitEnc = nil
+ }
if len(b.literals) >= 1024 && !raw {
// Use 4 Streams.
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index 66b51bf2d..d78be6d42 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -187,7 +187,7 @@ func (d *Decoder) Reset(r io.Reader) error {
d.current.err = err
d.current.flushed = true
if debug {
- println("sync decode to ", len(dst), "bytes, err:", err)
+ println("sync decode to", len(dst), "bytes, err:", err)
}
return nil
}
@@ -303,6 +303,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
frame.history.reset()
err := frame.reset(&frame.bBuf)
if err == io.EOF {
+ if debug {
+ println("frame reset return EOF")
+ }
return dst, nil
}
if frame.DictionaryID != nil {
@@ -341,6 +344,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
return dst, err
}
if len(frame.bBuf) == 0 {
+ if debug {
+ println("frame dbuf empty")
+ }
break
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go
index 8eb6f6ba3..fa25a18d8 100644
--- a/vendor/github.com/klauspost/compress/zstd/dict.go
+++ b/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -13,14 +13,31 @@ import (
type dict struct {
id uint32
- litDec *huff0.Scratch
+ litEnc *huff0.Scratch
llDec, ofDec, mlDec sequenceDec
- offsets [3]int
- content []byte
+ //llEnc, ofEnc, mlEnc []*fseEncoder
+ offsets [3]int
+ content []byte
}
var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec}
+// ID returns the dictionary id or 0 if d is nil.
+func (d *dict) ID() uint32 {
+ if d == nil {
+ return 0
+ }
+ return d.id
+}
+
+// DictContentSize returns the dictionary content size or 0 if d is nil.
+func (d *dict) DictContentSize() int {
+ if d == nil {
+ return 0
+ }
+ return len(d.content)
+}
+
// Load a dictionary as described in
// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
func loadDict(b []byte) (*dict, error) {
@@ -43,10 +60,11 @@ func loadDict(b []byte) (*dict, error) {
// Read literal table
var err error
- d.litDec, b, err = huff0.ReadTable(b[8:], nil)
+ d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
if err != nil {
return nil, err
}
+ d.litEnc.Reuse = huff0.ReusePolicyMust
br := byteReader{
b: b,
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
new file mode 100644
index 000000000..b1b7c6e6a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -0,0 +1,155 @@
+package zstd
+
+import (
+ "fmt"
+ "math/bits"
+
+ "github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+type fastBase struct {
+ // cur is the offset at the start of hist
+ cur int32
+ // maximum offset. Should be at least 2x block size.
+ maxMatchOff int32
+ hist []byte
+ crc *xxhash.Digest
+ tmp [8]byte
+ blk *blockEnc
+ lastDictID uint32
+}
+
+// CRC returns the underlying CRC writer.
+func (e *fastBase) CRC() *xxhash.Digest {
+ return e.crc
+}
+
+// AppendCRC will append the CRC to the destination slice and return it.
+func (e *fastBase) AppendCRC(dst []byte) []byte {
+ crc := e.crc.Sum(e.tmp[:0])
+ dst = append(dst, crc[7], crc[6], crc[5], crc[4])
+ return dst
+}
+
+// WindowSize returns the window size of the encoder,
+// or a window size small enough to contain the input size, if > 0.
+func (e *fastBase) WindowSize(size int) int32 {
+ if size > 0 && size < int(e.maxMatchOff) {
+ b := int32(1) << uint(bits.Len(uint(size)))
+ // Keep minimum window.
+ if b < 1024 {
+ b = 1024
+ }
+ return b
+ }
+ return e.maxMatchOff
+}
+
+// Block returns the current block.
+func (e *fastBase) Block() *blockEnc {
+ return e.blk
+}
+
+func (e *fastBase) addBlock(src []byte) int32 {
+ if debugAsserts && e.cur > bufferReset {
+ panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset))
+ }
+ // check if we have space already
+ if len(e.hist)+len(src) > cap(e.hist) {
+ if cap(e.hist) == 0 {
+ l := e.maxMatchOff * 2
+ // Make it at least 1MB.
+ if l < 1<<20 {
+ l = 1 << 20
+ }
+ e.hist = make([]byte, 0, l)
+ } else {
+ if cap(e.hist) < int(e.maxMatchOff*2) {
+ panic("unexpected buffer size")
+ }
+ // Move down
+ offset := int32(len(e.hist)) - e.maxMatchOff
+ copy(e.hist[0:e.maxMatchOff], e.hist[offset:])
+ e.cur += offset
+ e.hist = e.hist[:e.maxMatchOff]
+ }
+ }
+ s := int32(len(e.hist))
+ e.hist = append(e.hist, src...)
+ return s
+}
+
+// useBlock will replace the block with the provided one,
+// but transfer recent offsets from the previous.
+func (e *fastBase) UseBlock(enc *blockEnc) {
+ enc.reset(e.blk)
+ e.blk = enc
+}
+
+func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 {
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:], src[t:]))
+}
+
+func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
+ if debugAsserts {
+ if s < 0 {
+ err := fmt.Sprintf("s (%d) < 0", s)
+ panic(err)
+ }
+ if t < 0 {
+ err := fmt.Sprintf("s (%d) < 0", s)
+ panic(err)
+ }
+ if s-t > e.maxMatchOff {
+ err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff)
+ panic(err)
+ }
+ if len(src)-int(s) > maxCompressedBlockSize {
+ panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
+ }
+ }
+
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:], src[t:]))
+}
+
+// Reset the encoding table.
+func (e *fastBase) resetBase(d *dict, singleBlock bool) {
+ if e.blk == nil {
+ e.blk = &blockEnc{}
+ e.blk.init()
+ } else {
+ e.blk.reset(nil)
+ }
+ e.blk.initNewEncode()
+ if e.crc == nil {
+ e.crc = xxhash.New()
+ } else {
+ e.crc.Reset()
+ }
+ if (!singleBlock || d.DictContentSize() > 0) && cap(e.hist) < int(e.maxMatchOff*2)+d.DictContentSize() {
+ l := e.maxMatchOff*2 + int32(d.DictContentSize())
+ // Make it at least 1MB.
+ if l < 1<<20 {
+ l = 1 << 20
+ }
+ e.hist = make([]byte, 0, l)
+ }
+ // We offset current position so everything will be out of reach.
+ // If above reset line, history will be purged.
+ if e.cur < bufferReset {
+ e.cur += e.maxMatchOff + int32(len(e.hist))
+ }
+ e.hist = e.hist[:0]
+ if d != nil {
+ // Set offsets (currently not used)
+ for i, off := range d.offsets {
+ e.blk.recentOffsets[i] = uint32(off)
+ e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i]
+ }
+ // Transfer litenc.
+ e.blk.dictLitEnc = d.litEnc
+ e.hist = append(e.hist, d.content...)
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index c120d9054..94a5343d0 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -31,8 +31,10 @@ type prevEntry struct {
// and that it is longer (lazy matching).
type betterFastEncoder struct {
fastBase
- table [betterShortTableSize]tableEntry
- longTable [betterLongTableSize]prevEntry
+ table [betterShortTableSize]tableEntry
+ longTable [betterLongTableSize]prevEntry
+ dictTable []tableEntry
+ dictLongTable []prevEntry
}
// Encode improves compression...
@@ -516,3 +518,78 @@ encodeLoop:
func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
e.Encode(blk, src)
}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) {
+ e.resetBase(d, singleBlock)
+ if d == nil {
+ return
+ }
+ // Init or copy dict table
+ if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
+ if len(e.dictTable) != len(e.table) {
+ e.dictTable = make([]tableEntry, len(e.table))
+ }
+ end := int32(len(d.content)) - 8 + e.maxMatchOff
+ for i := e.maxMatchOff; i < end; i += 4 {
+ const hashLog = betterShortTableBits
+
+ cv := load6432(d.content, i-e.maxMatchOff)
+ nextHash := hash5(cv, hashLog) // 0 -> 4
+ nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5
+ nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6
+ nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7
+ e.dictTable[nextHash] = tableEntry{
+ val: uint32(cv),
+ offset: i,
+ }
+ e.dictTable[nextHash1] = tableEntry{
+ val: uint32(cv >> 8),
+ offset: i + 1,
+ }
+ e.dictTable[nextHash2] = tableEntry{
+ val: uint32(cv >> 16),
+ offset: i + 2,
+ }
+ e.dictTable[nextHash3] = tableEntry{
+ val: uint32(cv >> 24),
+ offset: i + 3,
+ }
+ }
+ e.lastDictID = d.id
+ }
+
+ // Init or copy dict table
+ if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
+ if len(e.dictLongTable) != len(e.longTable) {
+ e.dictLongTable = make([]prevEntry, len(e.longTable))
+ }
+ if len(d.content) >= 8 {
+ cv := load6432(d.content, 0)
+ h := hash8(cv, betterLongTableBits)
+ e.dictLongTable[h] = prevEntry{
+ offset: e.maxMatchOff,
+ prev: e.dictLongTable[h].offset,
+ }
+
+ end := int32(len(d.content)) - 8 + e.maxMatchOff
+ off := 8 // First to read
+ for i := e.maxMatchOff + 1; i < end; i++ {
+ cv = cv>>8 | (uint64(d.content[off]) << 56)
+ h := hash8(cv, betterLongTableBits)
+ e.dictLongTable[h] = prevEntry{
+ offset: i,
+ prev: e.dictLongTable[h].offset,
+ }
+ off++
+ }
+ }
+ e.lastDictID = d.id
+ }
+ // Reset table to initial state
+ copy(e.longTable[:], e.dictLongTable)
+
+ e.cur = e.maxMatchOff
+ // Reset table to initial state
+ copy(e.table[:], e.dictTable)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index 50276bcde..19eebf66e 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -18,7 +18,8 @@ const (
type doubleFastEncoder struct {
fastEncoder
- longTable [dFastLongTableSize]tableEntry
+ longTable [dFastLongTableSize]tableEntry
+ dictLongTable []tableEntry
}
// Encode mimmics functionality in zstd_dfast.c
@@ -494,7 +495,7 @@ encodeLoop:
// but the likelihood of both the first 4 bytes and the hash matching should be enough.
t = candidateL.offset - e.cur
if debugAsserts && s <= t {
- panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
@@ -676,3 +677,37 @@ encodeLoop:
e.cur += int32(len(src))
}
}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) {
+ e.fastEncoder.Reset(d, singleBlock)
+ if d == nil {
+ return
+ }
+
+ // Init or copy dict table
+ if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
+ if len(e.dictLongTable) != len(e.longTable) {
+ e.dictLongTable = make([]tableEntry, len(e.longTable))
+ }
+ if len(d.content) >= 8 {
+ cv := load6432(d.content, 0)
+ e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{
+ val: uint32(cv),
+ offset: e.maxMatchOff,
+ }
+ end := int32(len(d.content)) - 8 + e.maxMatchOff
+ for i := e.maxMatchOff + 1; i < end; i++ {
+ cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56)
+ e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{
+ val: uint32(cv),
+ offset: i,
+ }
+ }
+ }
+ e.lastDictID = d.id
+ }
+ // Reset table to initial state
+ e.cur = e.maxMatchOff
+ copy(e.longTable[:], e.dictLongTable)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index 4104b456c..0b301df43 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -8,8 +8,6 @@ import (
"fmt"
"math"
"math/bits"
-
- "github.com/klauspost/compress/zstd/internal/xxhash"
)
const (
@@ -24,51 +22,10 @@ type tableEntry struct {
offset int32
}
-type fastBase struct {
- // cur is the offset at the start of hist
- cur int32
- // maximum offset. Should be at least 2x block size.
- maxMatchOff int32
- hist []byte
- crc *xxhash.Digest
- tmp [8]byte
- blk *blockEnc
-}
-
type fastEncoder struct {
fastBase
- table [tableSize]tableEntry
-}
-
-// CRC returns the underlying CRC writer.
-func (e *fastBase) CRC() *xxhash.Digest {
- return e.crc
-}
-
-// AppendCRC will append the CRC to the destination slice and return it.
-func (e *fastBase) AppendCRC(dst []byte) []byte {
- crc := e.crc.Sum(e.tmp[:0])
- dst = append(dst, crc[7], crc[6], crc[5], crc[4])
- return dst
-}
-
-// WindowSize returns the window size of the encoder,
-// or a window size small enough to contain the input size, if > 0.
-func (e *fastBase) WindowSize(size int) int32 {
- if size > 0 && size < int(e.maxMatchOff) {
- b := int32(1) << uint(bits.Len(uint(size)))
- // Keep minimum window.
- if b < 1024 {
- b = 1024
- }
- return b
- }
- return e.maxMatchOff
-}
-
-// Block returns the current block.
-func (e *fastBase) Block() *blockEnc {
- return e.blk
+ table [tableSize]tableEntry
+ dictTable []tableEntry
}
// Encode mimmics functionality in zstd_fast.c
@@ -660,96 +617,45 @@ encodeLoop:
}
}
-func (e *fastBase) addBlock(src []byte) int32 {
- if debugAsserts && e.cur > bufferReset {
- panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset))
- }
- // check if we have space already
- if len(e.hist)+len(src) > cap(e.hist) {
- if cap(e.hist) == 0 {
- l := e.maxMatchOff * 2
- // Make it at least 1MB.
- if l < 1<<20 {
- l = 1 << 20
- }
- e.hist = make([]byte, 0, l)
- } else {
- if cap(e.hist) < int(e.maxMatchOff*2) {
- panic("unexpected buffer size")
- }
- // Move down
- offset := int32(len(e.hist)) - e.maxMatchOff
- copy(e.hist[0:e.maxMatchOff], e.hist[offset:])
- e.cur += offset
- e.hist = e.hist[:e.maxMatchOff]
- }
+// ResetDict will reset and set a dictionary if not nil
+func (e *fastEncoder) Reset(d *dict, singleBlock bool) {
+ e.resetBase(d, singleBlock)
+ if d == nil {
+ return
}
- s := int32(len(e.hist))
- e.hist = append(e.hist, src...)
- return s
-}
-// useBlock will replace the block with the provided one,
-// but transfer recent offsets from the previous.
-func (e *fastBase) UseBlock(enc *blockEnc) {
- enc.reset(e.blk)
- e.blk = enc
-}
-
-func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 {
- // Extend the match to be as long as possible.
- return int32(matchLen(src[s:], src[t:]))
-}
-
-func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
- if debugAsserts {
- if s < 0 {
- err := fmt.Sprintf("s (%d) < 0", s)
- panic(err)
- }
- if t < 0 {
- err := fmt.Sprintf("s (%d) < 0", s)
- panic(err)
- }
- if s-t > e.maxMatchOff {
- err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff)
- panic(err)
- }
- if len(src)-int(s) > maxCompressedBlockSize {
- panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
+ // Init or copy dict table
+ if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
+ if len(e.dictTable) != len(e.table) {
+ e.dictTable = make([]tableEntry, len(e.table))
+ }
+ if true {
+ end := e.maxMatchOff + int32(len(d.content)) - 8
+ for i := e.maxMatchOff; i < end; i += 3 {
+ const hashLog = tableBits
+
+ cv := load6432(d.content, i-e.maxMatchOff)
+ nextHash := hash6(cv, hashLog) // 0 -> 5
+ nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6
+ nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7
+ e.dictTable[nextHash] = tableEntry{
+ val: uint32(cv),
+ offset: i,
+ }
+ e.dictTable[nextHash1] = tableEntry{
+ val: uint32(cv >> 8),
+ offset: i + 1,
+ }
+ e.dictTable[nextHash2] = tableEntry{
+ val: uint32(cv >> 16),
+ offset: i + 2,
+ }
+ }
}
+ e.lastDictID = d.id
}
- // Extend the match to be as long as possible.
- return int32(matchLen(src[s:], src[t:]))
-}
-
-// Reset the encoding table.
-func (e *fastBase) Reset(singleBlock bool) {
- if e.blk == nil {
- e.blk = &blockEnc{}
- e.blk.init()
- } else {
- e.blk.reset(nil)
- }
- e.blk.initNewEncode()
- if e.crc == nil {
- e.crc = xxhash.New()
- } else {
- e.crc.Reset()
- }
- if !singleBlock && cap(e.hist) < int(e.maxMatchOff*2) {
- l := e.maxMatchOff * 2
- // Make it at least 1MB.
- if l < 1<<20 {
- l = 1 << 20
- }
- e.hist = make([]byte, 0, l)
- }
- // We offset current position so everything will be out of reach.
- // If above reset line, history will be purged.
- if e.cur < bufferReset {
- e.cur += e.maxMatchOff + int32(len(e.hist))
- }
- e.hist = e.hist[:0]
+ e.cur = e.maxMatchOff
+ // Reset table to initial state
+ copy(e.table[:], e.dictTable)
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_params.go b/vendor/github.com/klauspost/compress/zstd/enc_params.go
deleted file mode 100644
index d874116f7..000000000
--- a/vendor/github.com/klauspost/compress/zstd/enc_params.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2019+ Klaus Post. All rights reserved.
-// License information can be found in the LICENSE file.
-// Based on work by Yann Collet, released under BSD License.
-
-package zstd
-
-/*
-// encParams are not really used, just here for reference.
-type encParams struct {
- // largest match distance : larger == more compression, more memory needed during decompression
- windowLog uint8
-
- // fully searched segment : larger == more compression, slower, more memory (useless for fast)
- chainLog uint8
-
- // dispatch table : larger == faster, more memory
- hashLog uint8
-
- // < nb of searches : larger == more compression, slower
- searchLog uint8
-
- // < match length searched : larger == faster decompression, sometimes less compression
- minMatch uint8
-
- // acceptable match size for optimal parser (only) : larger == more compression, slower
- targetLength uint32
-
- // see ZSTD_strategy definition above
- strategy strategy
-}
-
-// strategy defines the algorithm to use when generating sequences.
-type strategy uint8
-
-const (
- // Compression strategies, listed from fastest to strongest
- strategyFast strategy = iota + 1
- strategyDfast
- strategyGreedy
- strategyLazy
- strategyLazy2
- strategyBtlazy2
- strategyBtopt
- strategyBtultra
- strategyBtultra2
- // note : new strategies _might_ be added in the future.
- // Only the order (from fast to strong) is guaranteed
-
-)
-
-var defEncParams = [4][]encParams{
- { // "default" - for any srcSize > 256 KB
- // W, C, H, S, L, TL, strat
- {19, 12, 13, 1, 6, 1, strategyFast}, // base for negative levels
- {19, 13, 14, 1, 7, 0, strategyFast}, // level 1
- {20, 15, 16, 1, 6, 0, strategyFast}, // level 2
- {21, 16, 17, 1, 5, 1, strategyDfast}, // level 3
- {21, 18, 18, 1, 5, 1, strategyDfast}, // level 4
- {21, 18, 19, 2, 5, 2, strategyGreedy}, // level 5
- {21, 19, 19, 3, 5, 4, strategyGreedy}, // level 6
- {21, 19, 19, 3, 5, 8, strategyLazy}, // level 7
- {21, 19, 19, 3, 5, 16, strategyLazy2}, // level 8
- {21, 19, 20, 4, 5, 16, strategyLazy2}, // level 9
- {22, 20, 21, 4, 5, 16, strategyLazy2}, // level 10
- {22, 21, 22, 4, 5, 16, strategyLazy2}, // level 11
- {22, 21, 22, 5, 5, 16, strategyLazy2}, // level 12
- {22, 21, 22, 5, 5, 32, strategyBtlazy2}, // level 13
- {22, 22, 23, 5, 5, 32, strategyBtlazy2}, // level 14
- {22, 23, 23, 6, 5, 32, strategyBtlazy2}, // level 15
- {22, 22, 22, 5, 5, 48, strategyBtopt}, // level 16
- {23, 23, 22, 5, 4, 64, strategyBtopt}, // level 17
- {23, 23, 22, 6, 3, 64, strategyBtultra}, // level 18
- {23, 24, 22, 7, 3, 256, strategyBtultra2}, // level 19
- {25, 25, 23, 7, 3, 256, strategyBtultra2}, // level 20
- {26, 26, 24, 7, 3, 512, strategyBtultra2}, // level 21
- {27, 27, 25, 9, 3, 999, strategyBtultra2}, // level 22
- },
- { // for srcSize <= 256 KB
- // W, C, H, S, L, T, strat
- {18, 12, 13, 1, 5, 1, strategyFast}, // base for negative levels
- {18, 13, 14, 1, 6, 0, strategyFast}, // level 1
- {18, 14, 14, 1, 5, 1, strategyDfast}, // level 2
- {18, 16, 16, 1, 4, 1, strategyDfast}, // level 3
- {18, 16, 17, 2, 5, 2, strategyGreedy}, // level 4.
- {18, 18, 18, 3, 5, 2, strategyGreedy}, // level 5.
- {18, 18, 19, 3, 5, 4, strategyLazy}, // level 6.
- {18, 18, 19, 4, 4, 4, strategyLazy}, // level 7
- {18, 18, 19, 4, 4, 8, strategyLazy2}, // level 8
- {18, 18, 19, 5, 4, 8, strategyLazy2}, // level 9
- {18, 18, 19, 6, 4, 8, strategyLazy2}, // level 10
- {18, 18, 19, 5, 4, 12, strategyBtlazy2}, // level 11.
- {18, 19, 19, 7, 4, 12, strategyBtlazy2}, // level 12.
- {18, 18, 19, 4, 4, 16, strategyBtopt}, // level 13
- {18, 18, 19, 4, 3, 32, strategyBtopt}, // level 14.
- {18, 18, 19, 6, 3, 128, strategyBtopt}, // level 15.
- {18, 19, 19, 6, 3, 128, strategyBtultra}, // level 16.
- {18, 19, 19, 8, 3, 256, strategyBtultra}, // level 17.
- {18, 19, 19, 6, 3, 128, strategyBtultra2}, // level 18.
- {18, 19, 19, 8, 3, 256, strategyBtultra2}, // level 19.
- {18, 19, 19, 10, 3, 512, strategyBtultra2}, // level 20.
- {18, 19, 19, 12, 3, 512, strategyBtultra2}, // level 21.
- {18, 19, 19, 13, 3, 999, strategyBtultra2}, // level 22.
- },
- { // for srcSize <= 128 KB
- // W, C, H, S, L, T, strat
- {17, 12, 12, 1, 5, 1, strategyFast}, // base for negative levels
- {17, 12, 13, 1, 6, 0, strategyFast}, // level 1
- {17, 13, 15, 1, 5, 0, strategyFast}, // level 2
- {17, 15, 16, 2, 5, 1, strategyDfast}, // level 3
- {17, 17, 17, 2, 4, 1, strategyDfast}, // level 4
- {17, 16, 17, 3, 4, 2, strategyGreedy}, // level 5
- {17, 17, 17, 3, 4, 4, strategyLazy}, // level 6
- {17, 17, 17, 3, 4, 8, strategyLazy2}, // level 7
- {17, 17, 17, 4, 4, 8, strategyLazy2}, // level 8
- {17, 17, 17, 5, 4, 8, strategyLazy2}, // level 9
- {17, 17, 17, 6, 4, 8, strategyLazy2}, // level 10
- {17, 17, 17, 5, 4, 8, strategyBtlazy2}, // level 11
- {17, 18, 17, 7, 4, 12, strategyBtlazy2}, // level 12
- {17, 18, 17, 3, 4, 12, strategyBtopt}, // level 13.
- {17, 18, 17, 4, 3, 32, strategyBtopt}, // level 14.
- {17, 18, 17, 6, 3, 256, strategyBtopt}, // level 15.
- {17, 18, 17, 6, 3, 128, strategyBtultra}, // level 16.
- {17, 18, 17, 8, 3, 256, strategyBtultra}, // level 17.
- {17, 18, 17, 10, 3, 512, strategyBtultra}, // level 18.
- {17, 18, 17, 5, 3, 256, strategyBtultra2}, // level 19.
- {17, 18, 17, 7, 3, 512, strategyBtultra2}, // level 20.
- {17, 18, 17, 9, 3, 512, strategyBtultra2}, // level 21.
- {17, 18, 17, 11, 3, 999, strategyBtultra2}, // level 22.
- },
- { // for srcSize <= 16 KB
- // W, C, H, S, L, T, strat
- {14, 12, 13, 1, 5, 1, strategyFast}, // base for negative levels
- {14, 14, 15, 1, 5, 0, strategyFast}, // level 1
- {14, 14, 15, 1, 4, 0, strategyFast}, // level 2
- {14, 14, 15, 2, 4, 1, strategyDfast}, // level 3
- {14, 14, 14, 4, 4, 2, strategyGreedy}, // level 4
- {14, 14, 14, 3, 4, 4, strategyLazy}, // level 5.
- {14, 14, 14, 4, 4, 8, strategyLazy2}, // level 6
- {14, 14, 14, 6, 4, 8, strategyLazy2}, // level 7
- {14, 14, 14, 8, 4, 8, strategyLazy2}, // level 8.
- {14, 15, 14, 5, 4, 8, strategyBtlazy2}, // level 9.
- {14, 15, 14, 9, 4, 8, strategyBtlazy2}, // level 10.
- {14, 15, 14, 3, 4, 12, strategyBtopt}, // level 11.
- {14, 15, 14, 4, 3, 24, strategyBtopt}, // level 12.
- {14, 15, 14, 5, 3, 32, strategyBtultra}, // level 13.
- {14, 15, 15, 6, 3, 64, strategyBtultra}, // level 14.
- {14, 15, 15, 7, 3, 256, strategyBtultra}, // level 15.
- {14, 15, 15, 5, 3, 48, strategyBtultra2}, // level 16.
- {14, 15, 15, 6, 3, 128, strategyBtultra2}, // level 17.
- {14, 15, 15, 7, 3, 256, strategyBtultra2}, // level 18.
- {14, 15, 15, 8, 3, 256, strategyBtultra2}, // level 19.
- {14, 15, 15, 8, 3, 512, strategyBtultra2}, // level 20.
- {14, 15, 15, 9, 3, 512, strategyBtultra2}, // level 21.
- {14, 15, 15, 10, 3, 999, strategyBtultra2}, // level 22.
- },
-}
-*/
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index 95ebc3d84..f5759211d 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -35,7 +35,7 @@ type encoder interface {
AppendCRC([]byte) []byte
WindowSize(size int) int32
UseBlock(*blockEnc)
- Reset(singleBlock bool)
+ Reset(d *dict, singleBlock bool)
}
type encoderState struct {
@@ -83,8 +83,6 @@ func (e *Encoder) initialize() {
e.encoders = make(chan encoder, e.o.concurrent)
for i := 0; i < e.o.concurrent; i++ {
enc := e.o.encoder()
- // If not single block, history will be allocated on first use.
- enc.Reset(true)
e.encoders <- enc
}
}
@@ -115,7 +113,7 @@ func (e *Encoder) Reset(w io.Writer) {
s.filling = s.filling[:0]
s.current = s.current[:0]
s.previous = s.previous[:0]
- s.encoder.Reset(false)
+ s.encoder.Reset(e.o.dict, false)
s.headerWritten = false
s.eofWritten = false
s.fullFrameWritten = false
@@ -200,8 +198,9 @@ func (e *Encoder) nextBlock(final bool) error {
WindowSize: uint32(s.encoder.WindowSize(0)),
SingleSegment: false,
Checksum: e.o.crc,
- DictID: 0,
+ DictID: e.o.dict.ID(),
}
+
dst, err := fh.appendTo(tmp[:0])
if err != nil {
return err
@@ -281,7 +280,7 @@ func (e *Encoder) nextBlock(final bool) error {
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
- err = blk.encode(e.o.noEntropy, !e.o.allLitEntropy)
+ err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
}
switch err {
case errIncompressible:
@@ -311,7 +310,13 @@ func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
if debug {
println("Using ReadFrom")
}
- // Maybe handle stuff queued?
+
+ // Flush any current writes.
+ if len(e.state.filling) > 0 {
+ if err := e.nextBlock(false); err != nil {
+ return 0, err
+ }
+ }
e.state.filling = e.state.filling[:e.o.blockSize]
src := e.state.filling
for {
@@ -328,7 +333,7 @@ func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
if debug {
println("ReadFrom: got EOF final block:", len(e.state.filling))
}
- return n, e.nextBlock(true)
+ return n, nil
default:
if debug {
println("ReadFrom: got error:", err)
@@ -450,7 +455,6 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
defer func() {
// Release encoder reference to last block.
// If a non-single block is needed the encoder will reset again.
- enc.Reset(true)
e.encoders <- enc
}()
// Use single segments when above minimum window and below 1MB.
@@ -463,7 +467,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
WindowSize: uint32(enc.WindowSize(len(src))),
SingleSegment: single,
Checksum: e.o.crc,
- DictID: 0,
+ DictID: e.o.dict.ID(),
}
// If less than 1MB, allocate a buffer up front.
@@ -477,13 +481,18 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
// If we can do everything in one block, prefer that.
if len(src) <= maxCompressedBlockSize {
+ enc.Reset(e.o.dict, true)
// Slightly faster with no history and everything in one block.
if e.o.crc {
_, _ = enc.CRC().Write(src)
}
blk := enc.Block()
blk.last = true
- enc.EncodeNoHist(blk, src)
+ if e.o.dict == nil {
+ enc.EncodeNoHist(blk, src)
+ } else {
+ enc.Encode(blk, src)
+ }
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
@@ -492,7 +501,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
// Output directly to dst
blk.output = dst
- err = blk.encode(e.o.noEntropy, !e.o.allLitEntropy)
+ err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
}
switch err {
@@ -508,7 +517,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
blk.output = oldout
} else {
- enc.Reset(false)
+ enc.Reset(e.o.dict, false)
blk := enc.Block()
for len(src) > 0 {
todo := src
@@ -519,7 +528,6 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
if e.o.crc {
_, _ = enc.CRC().Write(todo)
}
- blk.reset(nil)
blk.pushOffsets()
enc.Encode(blk, todo)
if len(src) == 0 {
@@ -529,7 +537,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
- err = blk.encode(e.o.noEntropy, !e.o.allLitEntropy)
+ err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
}
switch err {
@@ -544,6 +552,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
default:
panic(err)
}
+ blk.reset(nil)
}
}
if e.o.crc {
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index dfac14ddd..579206163 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -24,6 +24,7 @@ type encoderOptions struct {
allLitEntropy bool
customWindow bool
customALEntropy bool
+ dict *dict
}
func (o *encoderOptions) setDefault() {
@@ -265,3 +266,16 @@ func WithSingleSegment(b bool) EOption {
return nil
}
}
+
+// WithEncoderDict allows to register a dictionary that will be used for the encode.
+// The encoder *may* choose to use no dictionary instead for certain payloads.
+func WithEncoderDict(dict []byte) EOption {
+ return func(o *encoderOptions) error {
+ d, err := loadDict(dict)
+ if err != nil {
+ return err
+ }
+ o.dict = d
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go
index 4479cfe18..4ef7f5a3e 100644
--- a/vendor/github.com/klauspost/compress/zstd/frameenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go
@@ -5,6 +5,7 @@
package zstd
import (
+ "encoding/binary"
"fmt"
"io"
"math"
@@ -16,7 +17,7 @@ type frameHeader struct {
WindowSize uint32
SingleSegment bool
Checksum bool
- DictID uint32 // Not stored.
+ DictID uint32
}
const maxHeaderSize = 14
@@ -30,6 +31,24 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
if f.SingleSegment {
fhd |= 1 << 5
}
+
+ var dictIDContent []byte
+ if f.DictID > 0 {
+ var tmp [4]byte
+ if f.DictID < 256 {
+ fhd |= 1
+ tmp[0] = uint8(f.DictID)
+ dictIDContent = tmp[:1]
+ } else if f.DictID < 1<<16 {
+ fhd |= 2
+ binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID))
+ dictIDContent = tmp[:2]
+ } else {
+ fhd |= 3
+ binary.LittleEndian.PutUint32(tmp[:4], f.DictID)
+ dictIDContent = tmp[:4]
+ }
+ }
var fcs uint8
if f.ContentSize >= 256 {
fcs++
@@ -40,6 +59,7 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
if f.ContentSize >= 0xffffffff {
fcs++
}
+
fhd |= fcs << 6
dst = append(dst, fhd)
@@ -48,7 +68,9 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3
dst = append(dst, uint8(windowLog))
}
-
+ if f.DictID > 0 {
+ dst = append(dst, dictIDContent...)
+ }
switch fcs {
case 0:
if f.SingleSegment {
diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go
index f418f50fc..f783e32d2 100644
--- a/vendor/github.com/klauspost/compress/zstd/history.go
+++ b/vendor/github.com/klauspost/compress/zstd/history.go
@@ -37,7 +37,7 @@ func (h *history) reset() {
}
h.decoders = sequenceDecs{}
if h.huffTree != nil {
- if h.dict == nil || h.dict.litDec != h.huffTree {
+ if h.dict == nil || h.dict.litEnc != h.huffTree {
huffDecoderPool.Put(h.huffTree)
}
}
@@ -55,7 +55,7 @@ func (h *history) setDict(dict *dict) {
h.decoders.offsets = dict.ofDec
h.decoders.matchLengths = dict.mlDec
h.recentOffsets = dict.offsets
- h.huffTree = dict.litDec
+ h.huffTree = dict.litEnc
}
// append bytes to history.
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index 7ff870400..b5c8ef133 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -196,6 +196,10 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
s.literals = s.literals[ll:]
out := s.out
+ if mo == 0 && ml > 0 {
+ return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+ }
+
if mo > len(s.out)+len(hist) || mo > s.windowSize {
if len(s.dict) == 0 {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
@@ -218,10 +222,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
}
}
- if mo == 0 && ml > 0 {
- return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
- }
-
// Copy from history.
// TODO: Blocks without history could be made to ignore this completely.
if v := mo - len(s.out); v > 0 {
diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go
index 690428cd2..841fd95ac 100644
--- a/vendor/github.com/klauspost/compress/zstd/snappy.go
+++ b/vendor/github.com/klauspost/compress/zstd/snappy.go
@@ -111,7 +111,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
// Add empty last block
r.block.reset(nil)
r.block.last = true
- err := r.block.encodeLits(false)
+ err := r.block.encodeLits(r.block.literals, false)
if err != nil {
return written, err
}
@@ -178,7 +178,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
r.err = ErrSnappyCorrupt
return written, r.err
}
- err = r.block.encode(false, false)
+ err = r.block.encode(nil, false, false)
switch err {
case errIncompressible:
r.block.popOffsets()
@@ -188,7 +188,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
println("snappy.Decode:", err)
return written, err
}
- err = r.block.encodeLits(false)
+ err = r.block.encodeLits(r.block.literals, false)
if err != nil {
return written, err
}
@@ -235,7 +235,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
r.err = ErrSnappyCorrupt
return written, r.err
}
- err := r.block.encodeLits(false)
+ err := r.block.encodeLits(r.block.literals, false)
if err != nil {
return written, err
}
diff --git a/vendor/github.com/klauspost/pgzip/.travis.yml b/vendor/github.com/klauspost/pgzip/.travis.yml
index 6e9fca0ba..acfec4bb0 100644
--- a/vendor/github.com/klauspost/pgzip/.travis.yml
+++ b/vendor/github.com/klauspost/pgzip/.travis.yml
@@ -1,19 +1,22 @@
language: go
-sudo: false
-
os:
- linux
- osx
go:
- - 1.9.x
- - 1.10.x
+ - 1.13.x
+ - 1.14.x
+ - 1.15.x
- master
-script:
- - go test -v -cpu=1,2,4 .
- - go test -v -cpu=2 -race -short .
+env:
+ - GO111MODULE=off
+
+script:
+ - diff <(gofmt -d .) <(printf "")
+ - go test -v -cpu=1,2,4 .
+ - go test -v -cpu=2 -race -short .
matrix:
allow_failures:
diff --git a/vendor/github.com/klauspost/pgzip/gunzip.go b/vendor/github.com/klauspost/pgzip/gunzip.go
index 93efec714..d1ae730b2 100644
--- a/vendor/github.com/klauspost/pgzip/gunzip.go
+++ b/vendor/github.com/klauspost/pgzip/gunzip.go
@@ -331,6 +331,16 @@ func (z *Reader) killReadAhead() error {
// Wait for decompressor to be closed and return error, if any.
e, ok := <-z.closeErr
z.activeRA = false
+
+ for blk := range z.readAhead {
+ if blk.b != nil {
+ z.blockPool <- blk.b
+ }
+ }
+ if cap(z.current) > 0 {
+ z.blockPool <- z.current
+ z.current = nil
+ }
if !ok {
// Channel is closed, so if there was any error it has already been returned.
return nil
@@ -418,6 +428,7 @@ func (z *Reader) doReadAhead() {
case z.readAhead <- read{b: buf, err: err}:
case <-closeReader:
// Sent on close, we don't care about the next results
+ z.blockPool <- buf
return
}
if err != nil {
diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml
deleted file mode 100644
index 604314dd4..000000000
--- a/vendor/github.com/mattn/go-isatty/.travis.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-language: go
-sudo: false
-go:
- - 1.13.x
- - tip
-
-before_install:
- - go get -t -v ./...
-
-script:
- - ./go.test.sh
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE
deleted file mode 100644
index 65dc692b6..000000000
--- a/vendor/github.com/mattn/go-isatty/LICENSE
+++ /dev/null
@@ -1,9 +0,0 @@
-Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
-
-MIT License (Expat)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md
deleted file mode 100644
index 38418353e..000000000
--- a/vendor/github.com/mattn/go-isatty/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# go-isatty
-
-[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty)
-[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty)
-[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master)
-[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty)
-
-isatty for golang
-
-## Usage
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/mattn/go-isatty"
- "os"
-)
-
-func main() {
- if isatty.IsTerminal(os.Stdout.Fd()) {
- fmt.Println("Is Terminal")
- } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) {
- fmt.Println("Is Cygwin/MSYS2 Terminal")
- } else {
- fmt.Println("Is Not Terminal")
- }
-}
-```
-
-## Installation
-
-```
-$ go get github.com/mattn/go-isatty
-```
-
-## License
-
-MIT
-
-## Author
-
-Yasuhiro Matsumoto (a.k.a mattn)
-
-## Thanks
-
-* k-takata: base idea for IsCygwinTerminal
-
- https://github.com/k-takata/go-iscygpty
diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go
deleted file mode 100644
index 17d4f90eb..000000000
--- a/vendor/github.com/mattn/go-isatty/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package isatty implements interface to isatty
-package isatty
diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod
deleted file mode 100644
index 605c4c221..000000000
--- a/vendor/github.com/mattn/go-isatty/go.mod
+++ /dev/null
@@ -1,5 +0,0 @@
-module github.com/mattn/go-isatty
-
-go 1.12
-
-require golang.org/x/sys v0.0.0-20200116001909-b77594299b42
diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum
deleted file mode 100644
index 912e29cbc..000000000
--- a/vendor/github.com/mattn/go-isatty/go.sum
+++ /dev/null
@@ -1,2 +0,0 @@
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh
deleted file mode 100644
index 012162b07..000000000
--- a/vendor/github.com/mattn/go-isatty/go.test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-echo "" > coverage.txt
-
-for d in $(go list ./... | grep -v vendor); do
- go test -race -coverprofile=profile.out -covermode=atomic "$d"
- if [ -f profile.out ]; then
- cat profile.out >> coverage.txt
- rm profile.out
- fi
-done
diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
deleted file mode 100644
index 711f28808..000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build darwin freebsd openbsd netbsd dragonfly
-// +build !appengine
-
-package isatty
-
-import "golang.org/x/sys/unix"
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA)
- return err == nil
-}
-
-// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
-// terminal. This is also always false on this environment.
-func IsCygwinTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go
deleted file mode 100644
index ff714a376..000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_others.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build appengine js nacl
-
-package isatty
-
-// IsTerminal returns true if the file descriptor is terminal which
-// is always false on js and appengine classic which is a sandboxed PaaS.
-func IsTerminal(fd uintptr) bool {
- return false
-}
-
-// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
-// terminal. This is also always false on this environment.
-func IsCygwinTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go
deleted file mode 100644
index c5b6e0c08..000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_plan9.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// +build plan9
-
-package isatty
-
-import (
- "syscall"
-)
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(fd uintptr) bool {
- path, err := syscall.Fd2path(int(fd))
- if err != nil {
- return false
- }
- return path == "/dev/cons" || path == "/mnt/term/dev/cons"
-}
-
-// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
-// terminal. This is also always false on this environment.
-func IsCygwinTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
deleted file mode 100644
index bdd5c79a0..000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// +build solaris
-// +build !appengine
-
-package isatty
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
-func IsTerminal(fd uintptr) bool {
- var termio unix.Termio
- err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
- return err == nil
-}
-
-// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
-// terminal. This is also always false on this environment.
-func IsCygwinTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
deleted file mode 100644
index 31a1ca973..000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build linux aix
-// +build !appengine
-
-package isatty
-
-import "golang.org/x/sys/unix"
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS)
- return err == nil
-}
-
-// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
-// terminal. This is also always false on this environment.
-func IsCygwinTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go
deleted file mode 100644
index 1fa869154..000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_windows.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// +build windows
-// +build !appengine
-
-package isatty
-
-import (
- "errors"
- "strings"
- "syscall"
- "unicode/utf16"
- "unsafe"
-)
-
-const (
- objectNameInfo uintptr = 1
- fileNameInfo = 2
- fileTypePipe = 3
-)
-
-var (
- kernel32 = syscall.NewLazyDLL("kernel32.dll")
- ntdll = syscall.NewLazyDLL("ntdll.dll")
- procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
- procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
- procGetFileType = kernel32.NewProc("GetFileType")
- procNtQueryObject = ntdll.NewProc("NtQueryObject")
-)
-
-func init() {
- // Check if GetFileInformationByHandleEx is available.
- if procGetFileInformationByHandleEx.Find() != nil {
- procGetFileInformationByHandleEx = nil
- }
-}
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var st uint32
- r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
- return r != 0 && e == 0
-}
-
-// Check pipe name is used for cygwin/msys2 pty.
-// Cygwin/MSYS2 PTY has a name like:
-// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
-func isCygwinPipeName(name string) bool {
- token := strings.Split(name, "-")
- if len(token) < 5 {
- return false
- }
-
- if token[0] != `\msys` &&
- token[0] != `\cygwin` &&
- token[0] != `\Device\NamedPipe\msys` &&
- token[0] != `\Device\NamedPipe\cygwin` {
- return false
- }
-
- if token[1] == "" {
- return false
- }
-
- if !strings.HasPrefix(token[2], "pty") {
- return false
- }
-
- if token[3] != `from` && token[3] != `to` {
- return false
- }
-
- if token[4] != "master" {
- return false
- }
-
- return true
-}
-
-// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler
-// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion
-// guys are using Windows XP, this is a workaround for those guys, it will also work on system from
-// Windows vista to 10
-// see https://stackoverflow.com/a/18792477 for details
-func getFileNameByHandle(fd uintptr) (string, error) {
- if procNtQueryObject == nil {
- return "", errors.New("ntdll.dll: NtQueryObject not supported")
- }
-
- var buf [4 + syscall.MAX_PATH]uint16
- var result int
- r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5,
- fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0)
- if r != 0 {
- return "", e
- }
- return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil
-}
-
-// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
-// terminal.
-func IsCygwinTerminal(fd uintptr) bool {
- if procGetFileInformationByHandleEx == nil {
- name, err := getFileNameByHandle(fd)
- if err != nil {
- return false
- }
- return isCygwinPipeName(name)
- }
-
- // Cygwin/msys's pty is a pipe.
- ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
- if ft != fileTypePipe || e != 0 {
- return false
- }
-
- var buf [2 + syscall.MAX_PATH]uint16
- r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(),
- 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)),
- uintptr(len(buf)*2), 0, 0)
- if r == 0 || e != 0 {
- return false
- }
-
- l := *(*uint32)(unsafe.Pointer(&buf))
- return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2])))
-}
diff --git a/vendor/github.com/mattn/go-isatty/renovate.json b/vendor/github.com/mattn/go-isatty/renovate.json
deleted file mode 100644
index 5ae9d96b7..000000000
--- a/vendor/github.com/mattn/go-isatty/renovate.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "extends": [
- "config:base"
- ],
- "postUpdateOptions": [
- "gomodTidy"
- ]
-}
diff --git a/vendor/github.com/moby/sys/mountinfo/LICENSE b/vendor/github.com/moby/sys/mountinfo/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/moby/sys/mountinfo/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/moby/sys/mountinfo/go.mod b/vendor/github.com/moby/sys/mountinfo/go.mod
new file mode 100644
index 000000000..10d9a15a6
--- /dev/null
+++ b/vendor/github.com/moby/sys/mountinfo/go.mod
@@ -0,0 +1,3 @@
+module github.com/moby/sys/mountinfo
+
+go 1.14
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo.go b/vendor/github.com/moby/sys/mountinfo/mountinfo.go
new file mode 100644
index 000000000..136b14167
--- /dev/null
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo.go
@@ -0,0 +1,67 @@
+package mountinfo
+
+import "io"
+
+// GetMounts retrieves a list of mounts for the current running process,
+// with an optional filter applied (use nil for no filter).
+func GetMounts(f FilterFunc) ([]*Info, error) {
+ return parseMountTable(f)
+}
+
+// GetMountsFromReader retrieves a list of mounts from the
+// reader provided, with an optional filter applied (use nil
+// for no filter). This can be useful in tests or benchmarks
+// that provide a fake mountinfo data.
+func GetMountsFromReader(reader io.Reader, f FilterFunc) ([]*Info, error) {
+ return parseInfoFile(reader, f)
+}
+
+// Mounted determines if a specified mountpoint has been mounted.
+// On Linux it looks at /proc/self/mountinfo.
+func Mounted(mountpoint string) (bool, error) {
+ entries, err := GetMounts(SingleEntryFilter(mountpoint))
+ if err != nil {
+ return false, err
+ }
+
+ return len(entries) > 0, nil
+}
+
+// Info reveals information about a particular mounted filesystem. This
+// struct is populated from the content in the /proc/<pid>/mountinfo file.
+type Info struct {
+ // ID is a unique identifier of the mount (may be reused after umount).
+ ID int
+
+ // Parent indicates the ID of the mount parent (or of self for the top of the
+ // mount tree).
+ Parent int
+
+ // Major indicates one half of the device ID which identifies the device class.
+ Major int
+
+ // Minor indicates one half of the device ID which identifies a specific
+ // instance of device.
+ Minor int
+
+ // Root of the mount within the filesystem.
+ Root string
+
+ // Mountpoint indicates the mount point relative to the process's root.
+ Mountpoint string
+
+ // Opts represents mount-specific options.
+ Opts string
+
+ // Optional represents optional fields.
+ Optional string
+
+ // Fstype indicates the type of filesystem, such as EXT3.
+ Fstype string
+
+ // Source indicates filesystem specific information or "none".
+ Source string
+
+ // VfsOpts represents per super block options.
+ VfsOpts string
+}
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go
new file mode 100644
index 000000000..795026465
--- /dev/null
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go
@@ -0,0 +1,58 @@
+package mountinfo
+
+import "strings"
+
+// FilterFunc is a type defining a callback function for GetMount(),
+// used to filter out mountinfo entries we're not interested in,
+// and/or stop further processing if we found what we wanted.
+//
+// It takes a pointer to the Info struct (not fully populated,
+// currently only Mountpoint, Fstype, Source, and (on Linux)
+// VfsOpts are filled in), and returns two booleans:
+//
+// - skip: true if the entry should be skipped
+// - stop: true if parsing should be stopped after the entry
+type FilterFunc func(*Info) (skip, stop bool)
+
+// PrefixFilter discards all entries whose mount points
+// do not start with a specific prefix
+func PrefixFilter(prefix string) FilterFunc {
+ return func(m *Info) (bool, bool) {
+ skip := !strings.HasPrefix(m.Mountpoint, prefix)
+ return skip, false
+ }
+}
+
+// SingleEntryFilter looks for a specific entry
+func SingleEntryFilter(mp string) FilterFunc {
+ return func(m *Info) (bool, bool) {
+ if m.Mountpoint == mp {
+ return false, true // don't skip, stop now
+ }
+ return true, false // skip, keep going
+ }
+}
+
+// ParentsFilter returns all entries whose mount points
+// can be parents of a path specified, discarding others.
+//
+// For example, given `/var/lib/docker/something`, entries
+// like `/var/lib/docker`, `/var` and `/` are returned.
+func ParentsFilter(path string) FilterFunc {
+ return func(m *Info) (bool, bool) {
+ skip := !strings.HasPrefix(path, m.Mountpoint)
+ return skip, false
+ }
+}
+
+// FstypeFilter returns all entries that match provided fstype(s).
+func FstypeFilter(fstype ...string) FilterFunc {
+ return func(m *Info) (bool, bool) {
+ for _, t := range fstype {
+ if m.Fstype == t {
+ return false, false // don't skeep, keep going
+ }
+ }
+ return true, false // skip, keep going
+ }
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsd.go
index 4f32edcd9..a7dbb1b46 100644
--- a/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsd.go
@@ -1,4 +1,4 @@
-package mount
+package mountinfo
/*
#include <sys/param.h>
@@ -13,9 +13,8 @@ import (
"unsafe"
)
-// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
-// bind mounts.
-func parseMountTable() ([]*Info, error) {
+// parseMountTable returns information about mounted filesystems
+func parseMountTable(filter FilterFunc) ([]*Info, error) {
var rawEntries *C.struct_statfs
count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
@@ -32,10 +31,23 @@ func parseMountTable() ([]*Info, error) {
var out []*Info
for _, entry := range entries {
var mountinfo Info
+ var skip, stop bool
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
- mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
+ mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
+
+ if filter != nil {
+ // filter out entries we're not interested in
+ skip, stop = filter(&mountinfo)
+ if skip {
+ continue
+ }
+ }
+
out = append(out, &mountinfo)
+ if stop {
+ break
+ }
}
return out, nil
}
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go
new file mode 100644
index 000000000..2d630c8dc
--- /dev/null
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go
@@ -0,0 +1,152 @@
+// +build go1.13
+
+package mountinfo
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) {
+ s := bufio.NewScanner(r)
+ out := []*Info{}
+ var err error
+ for s.Scan() {
+ if err = s.Err(); err != nil {
+ return nil, err
+ }
+ /*
+ See http://man7.org/linux/man-pages/man5/proc.5.html
+
+ 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
+ (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
+
+ (1) mount ID: unique identifier of the mount (may be reused after umount)
+ (2) parent ID: ID of parent (or of self for the top of the mount tree)
+ (3) major:minor: value of st_dev for files on filesystem
+ (4) root: root of the mount within the filesystem
+ (5) mount point: mount point relative to the process's root
+ (6) mount options: per mount options
+ (7) optional fields: zero or more fields of the form "tag[:value]"
+ (8) separator: marks the end of the optional fields
+ (9) filesystem type: name of filesystem of the form "type[.subtype]"
+ (10) mount source: filesystem specific information or "none"
+ (11) super options: per super block options
+
+ In other words, we have:
+ * 6 mandatory fields (1)..(6)
+ * 0 or more optional fields (7)
+ * a separator field (8)
+ * 3 mandatory fields (9)..(11)
+ */
+
+ text := s.Text()
+ fields := strings.Split(text, " ")
+ numFields := len(fields)
+ if numFields < 10 {
+ // should be at least 10 fields
+ return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields)
+ }
+
+ // separator field
+ sepIdx := numFields - 4
+ // In Linux <= 3.9 mounting a cifs with spaces in a share
+ // name (like "//srv/My Docs") _may_ end up having a space
+ // in the last field of mountinfo (like "unc=//serv/My Docs").
+ // Since kernel 3.10-rc1, cifs option "unc=" is ignored,
+ // so spaces should not appear.
+ //
+ // Check for a separator, and work around the spaces bug
+ for fields[sepIdx] != "-" {
+ sepIdx--
+ if sepIdx == 5 {
+ return nil, fmt.Errorf("Parsing '%s' failed: missing - separator", text)
+ }
+ }
+
+ p := &Info{}
+
+ // Fill in the fields that a filter might check
+ p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`)
+ if err != nil {
+ return nil, fmt.Errorf("Parsing '%s' failed: unable to unquote mount point field: %w", fields[4], err)
+ }
+ p.Fstype = fields[sepIdx+1]
+ p.Source = fields[sepIdx+2]
+ p.VfsOpts = fields[sepIdx+3]
+
+ // Run a filter soon so we can skip parsing/adding entries
+ // the caller is not interested in
+ var skip, stop bool
+ if filter != nil {
+ skip, stop = filter(p)
+ if skip {
+ continue
+ }
+ }
+
+ // Fill in the rest of the fields
+
+ // ignore any numbers parsing errors, as there should not be any
+ p.ID, _ = strconv.Atoi(fields[0])
+ p.Parent, _ = strconv.Atoi(fields[1])
+ mm := strings.Split(fields[2], ":")
+ if len(mm) != 2 {
+ return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm)
+ }
+ p.Major, _ = strconv.Atoi(mm[0])
+ p.Minor, _ = strconv.Atoi(mm[1])
+
+ p.Root, err = strconv.Unquote(`"` + fields[3] + `"`)
+ if err != nil {
+ return nil, fmt.Errorf("Parsing '%s' failed: unable to unquote root field: %w", fields[3], err)
+ }
+
+ p.Opts = fields[5]
+
+ // zero or more optional fields
+ switch {
+ case sepIdx == 6:
+ // zero, do nothing
+ case sepIdx == 7:
+ p.Optional = fields[6]
+ default:
+ p.Optional = strings.Join(fields[6:sepIdx-1], " ")
+ }
+
+ out = append(out, p)
+ if stop {
+ break
+ }
+ }
+ return out, nil
+}
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts
+func parseMountTable(filter FilterFunc) ([]*Info, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f, filter)
+}
+
+// PidMountInfo collects the mounts for a specific process ID. If the process
+// ID is unknown, it is better to use `GetMounts` which will inspect
+// "/proc/self/mountinfo" instead.
+func PidMountInfo(pid int) ([]*Info, error) {
+ f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f, nil)
+}
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go
new file mode 100644
index 000000000..dc1869211
--- /dev/null
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go
@@ -0,0 +1,17 @@
+// +build !windows,!linux,!freebsd freebsd,!cgo
+
+package mountinfo
+
+import (
+ "fmt"
+ "io"
+ "runtime"
+)
+
+func parseMountTable(_ FilterFunc) ([]*Info, error) {
+ return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
+
+func parseInfoFile(_ io.Reader, f FilterFunc) ([]*Info, error) {
+ return parseMountTable(f)
+}
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go
new file mode 100644
index 000000000..69ffdc52b
--- /dev/null
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go
@@ -0,0 +1,12 @@
+package mountinfo
+
+import "io"
+
+func parseMountTable(_ FilterFunc) ([]*Info, error) {
+ // Do NOT return an error!
+ return nil, nil
+}
+
+func parseInfoFile(_ io.Reader, f FilterFunc) ([]*Info, error) {
+ return parseMountTable(f)
+}
diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
index bdf18327e..6092fcb63 100644
--- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md
+++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
@@ -1,3 +1,8 @@
+## 1.14.1
+
+### Fixes
+- Discard exported method declaration when running ginkgo bootstrap (#558) [f4b0240]
+
## 1.14.0
### Features
diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md
index fab114580..475e04994 100644
--- a/vendor/github.com/onsi/ginkgo/README.md
+++ b/vendor/github.com/onsi/ginkgo/README.md
@@ -80,10 +80,11 @@ Agouti allows you run WebDriver integration tests. Learn more about Agouti [her
You'll need the Go command-line tools. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed.
### Global installation
-To install the Ginkgo command line interface into the `$PATH` (actually to `$GOBIN`):
+To install the Ginkgo command line interface:
```bash
go get -u github.com/onsi/ginkgo/ginkgo
```
+Note that this will install it to `$GOBIN`, which will need to be in the `$PATH` (or equivalent). Run `go help install` for more information.
### Go module ["tools package"](https://github.com/golang/go/issues/25922):
Create (or update) a file called `tools/tools.go` with the following contents:
@@ -93,7 +94,7 @@ Create (or update) a file called `tools/tools.go` with the following contents:
package tools
import (
- _ "github.com/onsi/ginkgo"
+ _ "github.com/onsi/ginkgo/ginkgo"
)
// This file imports packages that are used when running go generate, or used
diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go
index 2ae48b804..feef2bcd6 100644
--- a/vendor/github.com/onsi/ginkgo/config/config.go
+++ b/vendor/github.com/onsi/ginkgo/config/config.go
@@ -20,7 +20,7 @@ import (
"fmt"
)
-const VERSION = "1.14.0"
+const VERSION = "1.14.1"
type GinkgoConfigType struct {
RandomSeed int64
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go b/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
index 3f7237c60..c87b72165 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
@@ -186,7 +186,9 @@ func getExportedDeclarationsForFile(path string) ([]string, error) {
declarations = append(declarations, s.Names[0].Name)
}
case *ast.FuncDecl:
- declarations = append(declarations, x.Name.Name)
+ if x.Recv == nil {
+ declarations = append(declarations, x.Name.Name)
+ }
}
}
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index 3aafdbcfc..0b6c2fb61 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,8 @@
+## 1.10.2
+
+### Fixes
+- Add ExpectWithOffset, EventuallyWithOffset and ConsistentlyWithOffset to WithT (#391) [990941a]
+
## 1.10.1
### Fixes
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index 8ff9611d5..b416d20cc 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -24,7 +24,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.10.1"
+const GOMEGA_VERSION = "1.10.2"
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
@@ -376,13 +376,13 @@ func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT {
return NewWithT(t)
}
-// Expect is used to make assertions. See documentation for Expect.
-func (g *WithT) Expect(actual interface{}, extra ...interface{}) Assertion {
- return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), 0, extra...)
+// ExpectWithOffset is used to make assertions. See documentation for ExpectWithOffset.
+func (g *WithT) ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion {
+ return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), offset, extra...)
}
-// Eventually is used to make asynchronous assertions. See documentation for Eventually.
-func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion {
+// EventuallyWithOffset is used to make asynchronous assertions. See documentation for EventuallyWithOffset.
+func (g *WithT) EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion {
timeoutInterval := defaultEventuallyTimeout
pollingInterval := defaultEventuallyPollingInterval
if len(intervals) > 0 {
@@ -391,11 +391,11 @@ func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAs
if len(intervals) > 1 {
pollingInterval = toDuration(intervals[1])
}
- return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0)
+ return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, offset)
}
-// Consistently is used to make asynchronous assertions. See documentation for Consistently.
-func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion {
+// ConsistentlyWithOffset is used to make asynchronous assertions. See documentation for ConsistentlyWithOffset.
+func (g *WithT) ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion {
timeoutInterval := defaultConsistentlyDuration
pollingInterval := defaultConsistentlyPollingInterval
if len(intervals) > 0 {
@@ -404,7 +404,22 @@ func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) Async
if len(intervals) > 1 {
pollingInterval = toDuration(intervals[1])
}
- return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0)
+ return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, offset)
+}
+
+// Expect is used to make assertions. See documentation for Expect.
+func (g *WithT) Expect(actual interface{}, extra ...interface{}) Assertion {
+ return g.ExpectWithOffset(0, actual, extra...)
+}
+
+// Eventually is used to make asynchronous assertions. See documentation for Eventually.
+func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion {
+ return g.EventuallyWithOffset(0, actual, intervals...)
+}
+
+// Consistently is used to make asynchronous assertions. See documentation for Consistently.
+func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion {
+ return g.ConsistentlyWithOffset(0, actual, intervals...)
}
func toDuration(input interface{}) time.Duration {
diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md
index a4224ce14..84bd5dcbd 100644
--- a/vendor/github.com/ulikunitz/xz/TODO.md
+++ b/vendor/github.com/ulikunitz/xz/TODO.md
@@ -90,6 +90,11 @@
## Log
+## 2020-08-19
+
+Release v0.5.8 fixes issue
+[issue #35](https://github.com/ulikunitz/xz/issues/35).
+
### 2020-02-24
Release v0.5.7 supports the check-ID None and fixes
diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go
index 364213dd9..dc8f32860 100644
--- a/vendor/github.com/ulikunitz/xz/bits.go
+++ b/vendor/github.com/ulikunitz/xz/bits.go
@@ -54,6 +54,8 @@ var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer")
// readUvarint reads a uvarint from the given byte reader.
func readUvarint(r io.ByteReader) (x uint64, n int, err error) {
+ const maxUvarintLen = 10
+
var s uint
i := 0
for {
@@ -62,8 +64,11 @@ func readUvarint(r io.ByteReader) (x uint64, n int, err error) {
return x, i, err
}
i++
+ if i > maxUvarintLen {
+ return x, i, errOverflowU64
+ }
if b < 0x80 {
- if i > 10 || i == 10 && b > 1 {
+ if i == maxUvarintLen && b > 1 {
return x, i, errOverflowU64
}
return x | uint64(b)<<s, i, nil
diff --git a/vendor/github.com/vbauerster/mpb/v5/bar.go b/vendor/github.com/vbauerster/mpb/v5/bar.go
index 9c28a07a8..358cb048d 100644
--- a/vendor/github.com/vbauerster/mpb/v5/bar.go
+++ b/vendor/github.com/vbauerster/mpb/v5/bar.go
@@ -86,7 +86,7 @@ func newBar(container *Progress, bs *bState) *Bar {
noPop: bs.noPop,
operateState: make(chan func(*bState)),
frameCh: make(chan io.Reader, 1),
- syncTableCh: make(chan [][]chan int),
+ syncTableCh: make(chan [][]chan int, 1),
completed: make(chan bool, 1),
done: make(chan struct{}),
cancel: cancel,
@@ -132,14 +132,18 @@ func (b *Bar) Current() int64 {
// Given default bar style is "[=>-]<+", refill rune is '+'.
// To set bar style use mpb.BarStyle(string) BarOption.
func (b *Bar) SetRefill(amount int64) {
- b.operateState <- func(s *bState) {
+ select {
+ case b.operateState <- func(s *bState) {
s.refill = amount
+ }:
+ case <-b.done:
}
}
// TraverseDecorators traverses all available decorators and calls cb func on each.
func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) {
- b.operateState <- func(s *bState) {
+ select {
+ case b.operateState <- func(s *bState) {
for _, decorators := range [...][]decor.Decorator{
s.pDecorators,
s.aDecorators,
@@ -148,6 +152,8 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) {
cb(extractBaseDecorator(d))
}
}
+ }:
+ case <-b.done:
}
}
@@ -174,6 +180,7 @@ func (b *Bar) SetTotal(total int64, complete bool) {
}
// SetCurrent sets progress' current to an arbitrary value.
+// Setting a negative value will cause a panic.
func (b *Bar) SetCurrent(current int64) {
select {
case b.operateState <- func(s *bState) {
@@ -305,11 +312,13 @@ func (b *Bar) render(tw int) {
defer func() {
// recovering if user defined decorator panics for example
if p := recover(); p != nil {
- s.extender = makePanicExtender(p)
+ if b.recoveredPanic == nil {
+ s.extender = makePanicExtender(p)
+ b.toShutdown = !b.toShutdown
+ b.recoveredPanic = p
+ }
frame, lines := s.extender(nil, s.reqWidth, stat)
b.extendedLines = lines
- b.toShutdown = !b.toShutdown
- b.recoveredPanic = p
b.frameCh <- frame
b.dlogger.Println(p)
}
@@ -348,12 +357,15 @@ func (b *Bar) subscribeDecorators() {
shutdownListeners = append(shutdownListeners, d)
}
})
- b.operateState <- func(s *bState) {
+ select {
+ case b.operateState <- func(s *bState) {
s.averageDecorators = averageDecorators
s.ewmaDecorators = ewmaDecorators
s.shutdownListeners = shutdownListeners
+ }:
+ b.hasEwmaDecorators = len(ewmaDecorators) != 0
+ case <-b.done:
}
- b.hasEwmaDecorators = len(ewmaDecorators) != 0
}
func (b *Bar) refreshTillShutdown() {
diff --git a/vendor/github.com/vbauerster/mpb/v5/cwriter/util_bsd.go b/vendor/github.com/vbauerster/mpb/v5/cwriter/util_bsd.go
new file mode 100644
index 000000000..4e3564ece
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v5/cwriter/util_bsd.go
@@ -0,0 +1,7 @@
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package cwriter
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TIOCGETA
diff --git a/vendor/github.com/vbauerster/mpb/v5/cwriter/util_linux.go b/vendor/github.com/vbauerster/mpb/v5/cwriter/util_linux.go
new file mode 100644
index 000000000..253f12dd2
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v5/cwriter/util_linux.go
@@ -0,0 +1,7 @@
+// +build aix linux
+
+package cwriter
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
diff --git a/vendor/github.com/vbauerster/mpb/v5/cwriter/util_solaris.go b/vendor/github.com/vbauerster/mpb/v5/cwriter/util_solaris.go
new file mode 100644
index 000000000..4b29ff5c0
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v5/cwriter/util_solaris.go
@@ -0,0 +1,7 @@
+// +build solaris
+
+package cwriter
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETA
diff --git a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go
index bb503360d..6f57875c6 100644
--- a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go
+++ b/vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go
@@ -3,17 +3,19 @@ package cwriter
import (
"bytes"
"errors"
- "fmt"
"io"
"os"
-
- "github.com/mattn/go-isatty"
+ "strconv"
)
// NotATTY not a TeleTYpewriter error.
var NotATTY = errors.New("not a terminal")
-var cuuAndEd = fmt.Sprintf("%c[%%dA%[1]c[J", 27)
+// http://ascii-table.com/ansi-escape-sequences.php
+const (
+ escOpen = "\x1b["
+ cuuAndEd = "A\x1b[J"
+)
// Writer is a buffered the writer that updates the terminal. The
// contents of writer will be flushed when Flush is called.
@@ -21,7 +23,7 @@ type Writer struct {
out io.Writer
buf bytes.Buffer
lineCount int
- fd uintptr
+ fd int
isTerminal bool
}
@@ -29,8 +31,8 @@ type Writer struct {
func New(out io.Writer) *Writer {
w := &Writer{out: out}
if f, ok := out.(*os.File); ok {
- w.fd = f.Fd()
- w.isTerminal = isatty.IsTerminal(w.fd)
+ w.fd = int(f.Fd())
+ w.isTerminal = IsTerminal(w.fd)
}
return w
}
@@ -39,7 +41,10 @@ func New(out io.Writer) *Writer {
func (w *Writer) Flush(lineCount int) (err error) {
// some terminals interpret clear 0 lines as clear 1
if w.lineCount > 0 {
- w.clearLines()
+ err = w.clearLines()
+ if err != nil {
+ return
+ }
}
w.lineCount = lineCount
_, err = w.buf.WriteTo(w.out)
@@ -70,3 +75,10 @@ func (w *Writer) GetWidth() (int, error) {
tw, _, err := GetSize(w.fd)
return tw, err
}
+
+func (w *Writer) ansiCuuAndEd() (err error) {
+ buf := make([]byte, 8)
+ buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lineCount), 10)
+ _, err = w.out.Write(append(buf, cuuAndEd...))
+ return
+}
diff --git a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go b/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go
index e836cec3a..f54a5d06b 100644
--- a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go
+++ b/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go
@@ -3,20 +3,24 @@
package cwriter
import (
- "fmt"
-
"golang.org/x/sys/unix"
)
-func (w *Writer) clearLines() {
- fmt.Fprintf(w.out, cuuAndEd, w.lineCount)
+func (w *Writer) clearLines() error {
+ return w.ansiCuuAndEd()
}
// GetSize returns the dimensions of the given terminal.
-func GetSize(fd uintptr) (width, height int, err error) {
- ws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
+func GetSize(fd int) (width, height int, err error) {
+ ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
if err != nil {
return -1, -1, err
}
return int(ws.Col), int(ws.Row), nil
}
+
+// IsTerminal returns whether the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+ _, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+ return err == nil
+}
diff --git a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go b/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go
index 7a3ed5bcc..1a69c81ac 100644
--- a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go
+++ b/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go
@@ -3,67 +3,71 @@
package cwriter
import (
- "fmt"
- "syscall"
"unsafe"
+
+ "golang.org/x/sys/windows"
)
-var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+var kernel32 = windows.NewLazySystemDLL("kernel32.dll")
var (
- procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
)
-type coord struct {
- x int16
- y int16
-}
-
-type smallRect struct {
- left int16
- top int16
- right int16
- bottom int16
-}
-
-type consoleScreenBufferInfo struct {
- size coord
- cursorPosition coord
- attributes uint16
- window smallRect
- maximumWindowSize coord
-}
-
-func (w *Writer) clearLines() {
+func (w *Writer) clearLines() error {
if !w.isTerminal {
- fmt.Fprintf(w.out, cuuAndEd, w.lineCount)
+ // hope it's cygwin or similar
+ return w.ansiCuuAndEd()
}
- info := new(consoleScreenBufferInfo)
- procGetConsoleScreenBufferInfo.Call(w.fd, uintptr(unsafe.Pointer(info)))
+ var info windows.ConsoleScreenBufferInfo
+ if err := windows.GetConsoleScreenBufferInfo(windows.Handle(w.fd), &info); err != nil {
+ return err
+ }
- info.cursorPosition.y -= int16(w.lineCount)
- if info.cursorPosition.y < 0 {
- info.cursorPosition.y = 0
+ info.CursorPosition.Y -= int16(w.lineCount)
+ if info.CursorPosition.Y < 0 {
+ info.CursorPosition.Y = 0
}
- procSetConsoleCursorPosition.Call(w.fd, uintptr(uint32(uint16(info.cursorPosition.y))<<16|uint32(uint16(info.cursorPosition.x))))
+ _, _, _ = procSetConsoleCursorPosition.Call(
+ uintptr(w.fd),
+ uintptr(uint32(uint16(info.CursorPosition.Y))<<16|uint32(uint16(info.CursorPosition.X))),
+ )
// clear the lines
- cursor := &coord{
- x: info.window.left,
- y: info.cursorPosition.y,
+ cursor := &windows.Coord{
+ X: info.Window.Left,
+ Y: info.CursorPosition.Y,
}
- count := uint32(info.size.x) * uint32(w.lineCount)
- procFillConsoleOutputCharacter.Call(w.fd, uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(cursor)), uintptr(unsafe.Pointer(new(uint32))))
+ count := uint32(info.Size.X) * uint32(w.lineCount)
+ _, _, _ = procFillConsoleOutputCharacter.Call(
+ uintptr(w.fd),
+ uintptr(' '),
+ uintptr(count),
+ *(*uintptr)(unsafe.Pointer(cursor)),
+ uintptr(unsafe.Pointer(new(uint32))),
+ )
+ return nil
}
// GetSize returns the visible dimensions of the given terminal.
//
// These dimensions don't include any scrollback buffer height.
-func GetSize(fd uintptr) (width, height int, err error) {
- info := new(consoleScreenBufferInfo)
- procGetConsoleScreenBufferInfo.Call(fd, uintptr(unsafe.Pointer(info)))
- return int(info.window.right - info.window.left), int(info.window.bottom - info.window.top), nil
+func GetSize(fd int) (width, height int, err error) {
+ var info windows.ConsoleScreenBufferInfo
+ if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil {
+ return 0, 0, err
+ }
+ // terminal.GetSize from crypto/ssh adds "+ 1" to both width and height:
+ // https://go.googlesource.com/crypto/+/refs/heads/release-branch.go1.14/ssh/terminal/util_windows.go#75
+ // but looks like this is a root cause of issue #66, so removing both "+ 1" have fixed it.
+ return int(info.Window.Right - info.Window.Left), int(info.Window.Bottom - info.Window.Top), nil
+}
+
+// IsTerminal returns whether the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+ var st uint32
+ err := windows.GetConsoleMode(windows.Handle(fd), &st)
+ return err == nil
}
diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/counters.go b/vendor/github.com/vbauerster/mpb/v5/decor/counters.go
index 010ec371a..4a5343d41 100644
--- a/vendor/github.com/vbauerster/mpb/v5/decor/counters.go
+++ b/vendor/github.com/vbauerster/mpb/v5/decor/counters.go
@@ -2,6 +2,7 @@ package decor
import (
"fmt"
+ "strings"
)
const (
@@ -31,7 +32,7 @@ func CountersKiloByte(pairFmt string, wcc ...WC) Decorator {
//
// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
//
-// `pairFmt` printf compatible verbs for current and total, like "%f" or "%d"
+// `pairFmt` printf compatible verbs for current and total pair
//
// `wcc` optional WC config
//
@@ -43,25 +44,200 @@ func CountersKiloByte(pairFmt string, wcc ...WC) Decorator {
// pairFmt="% d / % d" output: "1 MB / 12 MB"
//
func Counters(unit int, pairFmt string, wcc ...WC) Decorator {
- return Any(chooseSizeProducer(unit, pairFmt), wcc...)
+ producer := func(unit int, pairFmt string) DecorFunc {
+ if pairFmt == "" {
+ pairFmt = "%d / %d"
+ } else if strings.Count(pairFmt, "%") != 2 {
+ panic("expected pairFmt with exactly 2 verbs")
+ }
+ switch unit {
+ case UnitKiB:
+ return func(s Statistics) string {
+ return fmt.Sprintf(pairFmt, SizeB1024(s.Current), SizeB1024(s.Total))
+ }
+ case UnitKB:
+ return func(s Statistics) string {
+ return fmt.Sprintf(pairFmt, SizeB1000(s.Current), SizeB1000(s.Total))
+ }
+ default:
+ return func(s Statistics) string {
+ return fmt.Sprintf(pairFmt, s.Current, s.Total)
+ }
+ }
+ }
+ return Any(producer(unit, pairFmt), wcc...)
+}
+
+// TotalNoUnit is a wrapper around Total with no unit param.
+func TotalNoUnit(format string, wcc ...WC) Decorator {
+ return Total(0, format, wcc...)
+}
+
+// TotalKibiByte is a wrapper around Total with predefined unit
+// UnitKiB (bytes/1024).
+func TotalKibiByte(format string, wcc ...WC) Decorator {
+ return Total(UnitKiB, format, wcc...)
}
-func chooseSizeProducer(unit int, format string) DecorFunc {
- if format == "" {
- format = "%d / %d"
+// TotalKiloByte is a wrapper around Total with predefined unit
+// UnitKB (bytes/1000).
+func TotalKiloByte(format string, wcc ...WC) Decorator {
+ return Total(UnitKB, format, wcc...)
+}
+
+// Total decorator with dynamic unit measure adjustment.
+//
+// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+//
+// `format` printf compatible verb for Total
+//
+// `wcc` optional WC config
+//
+// format example if unit=UnitKiB:
+//
+// format="%.1f" output: "12.0MiB"
+// format="% .1f" output: "12.0 MiB"
+// format="%d" output: "12MiB"
+// format="% d" output: "12 MiB"
+//
+func Total(unit int, format string, wcc ...WC) Decorator {
+ producer := func(unit int, format string) DecorFunc {
+ if format == "" {
+ format = "%d"
+ } else if strings.Count(format, "%") != 1 {
+ panic("expected format with exactly 1 verb")
+ }
+
+ switch unit {
+ case UnitKiB:
+ return func(s Statistics) string {
+ return fmt.Sprintf(format, SizeB1024(s.Total))
+ }
+ case UnitKB:
+ return func(s Statistics) string {
+ return fmt.Sprintf(format, SizeB1000(s.Total))
+ }
+ default:
+ return func(s Statistics) string {
+ return fmt.Sprintf(format, s.Total)
+ }
+ }
}
- switch unit {
- case UnitKiB:
- return func(s Statistics) string {
- return fmt.Sprintf(format, SizeB1024(s.Current), SizeB1024(s.Total))
+ return Any(producer(unit, format), wcc...)
+}
+
+// CurrentNoUnit is a wrapper around Current with no unit param.
+func CurrentNoUnit(format string, wcc ...WC) Decorator {
+ return Current(0, format, wcc...)
+}
+
+// CurrentKibiByte is a wrapper around Current with predefined unit
+// UnitKiB (bytes/1024).
+func CurrentKibiByte(format string, wcc ...WC) Decorator {
+ return Current(UnitKiB, format, wcc...)
+}
+
+// CurrentKiloByte is a wrapper around Current with predefined unit
+// UnitKB (bytes/1000).
+func CurrentKiloByte(format string, wcc ...WC) Decorator {
+ return Current(UnitKB, format, wcc...)
+}
+
+// Current decorator with dynamic unit measure adjustment.
+//
+// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+//
+// `format` printf compatible verb for Current
+//
+// `wcc` optional WC config
+//
+// format example if unit=UnitKiB:
+//
+// format="%.1f" output: "12.0MiB"
+// format="% .1f" output: "12.0 MiB"
+// format="%d" output: "12MiB"
+// format="% d" output: "12 MiB"
+//
+func Current(unit int, format string, wcc ...WC) Decorator {
+ producer := func(unit int, format string) DecorFunc {
+ if format == "" {
+ format = "%d"
+ } else if strings.Count(format, "%") != 1 {
+ panic("expected format with exactly 1 verb")
}
- case UnitKB:
- return func(s Statistics) string {
- return fmt.Sprintf(format, SizeB1000(s.Current), SizeB1000(s.Total))
+
+ switch unit {
+ case UnitKiB:
+ return func(s Statistics) string {
+ return fmt.Sprintf(format, SizeB1024(s.Current))
+ }
+ case UnitKB:
+ return func(s Statistics) string {
+ return fmt.Sprintf(format, SizeB1000(s.Current))
+ }
+ default:
+ return func(s Statistics) string {
+ return fmt.Sprintf(format, s.Current)
+ }
}
- default:
- return func(s Statistics) string {
- return fmt.Sprintf(format, s.Current, s.Total)
+ }
+ return Any(producer(unit, format), wcc...)
+}
+
+// InvertedCurrentNoUnit is a wrapper around InvertedCurrent with no unit param.
+func InvertedCurrentNoUnit(format string, wcc ...WC) Decorator {
+ return InvertedCurrent(0, format, wcc...)
+}
+
+// InvertedCurrentKibiByte is a wrapper around InvertedCurrent with predefined unit
+// UnitKiB (bytes/1024).
+func InvertedCurrentKibiByte(format string, wcc ...WC) Decorator {
+ return InvertedCurrent(UnitKiB, format, wcc...)
+}
+
+// InvertedCurrentKiloByte is a wrapper around InvertedCurrent with predefined unit
+// UnitKB (bytes/1000).
+func InvertedCurrentKiloByte(format string, wcc ...WC) Decorator {
+ return InvertedCurrent(UnitKB, format, wcc...)
+}
+
+// InvertedCurrent decorator with dynamic unit measure adjustment.
+//
+// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+//
+// `format` printf compatible verb for InvertedCurrent
+//
+// `wcc` optional WC config
+//
+// format example if unit=UnitKiB:
+//
+// format="%.1f" output: "12.0MiB"
+// format="% .1f" output: "12.0 MiB"
+// format="%d" output: "12MiB"
+// format="% d" output: "12 MiB"
+//
+func InvertedCurrent(unit int, format string, wcc ...WC) Decorator {
+ producer := func(unit int, format string) DecorFunc {
+ if format == "" {
+ format = "%d"
+ } else if strings.Count(format, "%") != 1 {
+ panic("expected format with exactly 1 verb")
+ }
+
+ switch unit {
+ case UnitKiB:
+ return func(s Statistics) string {
+ return fmt.Sprintf(format, SizeB1024(s.Total-s.Current))
+ }
+ case UnitKB:
+ return func(s Statistics) string {
+ return fmt.Sprintf(format, SizeB1000(s.Total-s.Current))
+ }
+ default:
+ return func(s Statistics) string {
+ return fmt.Sprintf(format, s.Total-s.Current)
+ }
}
}
+ return Any(producer(unit, format), wcc...)
}
diff --git a/vendor/github.com/vbauerster/mpb/v5/go.mod b/vendor/github.com/vbauerster/mpb/v5/go.mod
index 389a19d54..642bf0a5a 100644
--- a/vendor/github.com/vbauerster/mpb/v5/go.mod
+++ b/vendor/github.com/vbauerster/mpb/v5/go.mod
@@ -3,9 +3,8 @@ module github.com/vbauerster/mpb/v5
require (
github.com/VividCortex/ewma v1.1.1
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
- github.com/mattn/go-isatty v0.0.12
github.com/mattn/go-runewidth v0.0.9
- golang.org/x/sys v0.0.0-20200519105757-fe76b779f299
+ golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed
)
go 1.14
diff --git a/vendor/github.com/vbauerster/mpb/v5/go.sum b/vendor/github.com/vbauerster/mpb/v5/go.sum
index dcaa8c553..7ad08f141 100644
--- a/vendor/github.com/vbauerster/mpb/v5/go.sum
+++ b/vendor/github.com/vbauerster/mpb/v5/go.sum
@@ -2,10 +2,7 @@ github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdc
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
-github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed h1:WBkVNH1zd9jg/dK4HCM4lNANnmd12EHC9z+LmcCG4ns=
+golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go
index e44deb757..5cce25ed9 100644
--- a/vendor/golang.org/x/sys/cpu/cpu.go
+++ b/vendor/golang.org/x/sys/cpu/cpu.go
@@ -6,6 +6,11 @@
// various CPU architectures.
package cpu
+import (
+ "os"
+ "strings"
+)
+
// Initialized reports whether the CPU features were initialized.
//
// For some GOOS/GOARCH combinations initialization of the CPU features depends
@@ -169,3 +174,94 @@ var S390X struct {
HasVXE bool // vector-enhancements facility 1
_ CacheLinePad
}
+
+func init() {
+ archInit()
+ initOptions()
+ processOptions()
+}
+
+// options contains the cpu debug options that can be used in GODEBUG.
+// Options are arch dependent and are added by the arch specific initOptions functions.
+// Features that are mandatory for the specific GOARCH should have the Required field set
+// (e.g. SSE2 on amd64).
+var options []option
+
+// Option names should be lower case. e.g. avx instead of AVX.
+type option struct {
+ Name string
+ Feature *bool
+ Specified bool // whether feature value was specified in GODEBUG
+ Enable bool // whether feature should be enabled
+ Required bool // whether feature is mandatory and can not be disabled
+}
+
+func processOptions() {
+ env := os.Getenv("GODEBUG")
+field:
+ for env != "" {
+ field := ""
+ i := strings.IndexByte(env, ',')
+ if i < 0 {
+ field, env = env, ""
+ } else {
+ field, env = env[:i], env[i+1:]
+ }
+ if len(field) < 4 || field[:4] != "cpu." {
+ continue
+ }
+ i = strings.IndexByte(field, '=')
+ if i < 0 {
+ print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n")
+ continue
+ }
+ key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on"
+
+ var enable bool
+ switch value {
+ case "on":
+ enable = true
+ case "off":
+ enable = false
+ default:
+ print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n")
+ continue field
+ }
+
+ if key == "all" {
+ for i := range options {
+ options[i].Specified = true
+ options[i].Enable = enable || options[i].Required
+ }
+ continue field
+ }
+
+ for i := range options {
+ if options[i].Name == key {
+ options[i].Specified = true
+ options[i].Enable = enable
+ continue field
+ }
+ }
+
+ print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n")
+ }
+
+ for _, o := range options {
+ if !o.Specified {
+ continue
+ }
+
+ if o.Enable && !*o.Feature {
+ print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n")
+ continue
+ }
+
+ if !o.Enable && o.Required {
+ print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n")
+ continue
+ }
+
+ *o.Feature = o.Enable
+ }
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go
index da2989668..464a209cf 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_aix.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go
@@ -6,8 +6,6 @@
package cpu
-const cacheLineSize = 128
-
const (
// getsystemcfg constants
_SC_IMPL = 2
@@ -15,7 +13,7 @@ const (
_IMPL_POWER9 = 0x20000
)
-func init() {
+func archInit() {
impl := getsystemcfg(_SC_IMPL)
if impl&_IMPL_POWER8 != 0 {
PPC64.IsPOWER8 = true
diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go
index 981af6818..301b752e9 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_arm.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go
@@ -38,3 +38,36 @@ const (
hwcap2_SHA2 = 1 << 3
hwcap2_CRC32 = 1 << 4
)
+
+func initOptions() {
+ options = []option{
+ {Name: "pmull", Feature: &ARM.HasPMULL},
+ {Name: "sha1", Feature: &ARM.HasSHA1},
+ {Name: "sha2", Feature: &ARM.HasSHA2},
+ {Name: "swp", Feature: &ARM.HasSWP},
+ {Name: "thumb", Feature: &ARM.HasTHUMB},
+ {Name: "thumbee", Feature: &ARM.HasTHUMBEE},
+ {Name: "tls", Feature: &ARM.HasTLS},
+ {Name: "vfp", Feature: &ARM.HasVFP},
+ {Name: "vfpd32", Feature: &ARM.HasVFPD32},
+ {Name: "vfpv3", Feature: &ARM.HasVFPv3},
+ {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16},
+ {Name: "vfpv4", Feature: &ARM.HasVFPv4},
+ {Name: "half", Feature: &ARM.HasHALF},
+ {Name: "26bit", Feature: &ARM.Has26BIT},
+ {Name: "fastmul", Feature: &ARM.HasFASTMUL},
+ {Name: "fpa", Feature: &ARM.HasFPA},
+ {Name: "edsp", Feature: &ARM.HasEDSP},
+ {Name: "java", Feature: &ARM.HasJAVA},
+ {Name: "iwmmxt", Feature: &ARM.HasIWMMXT},
+ {Name: "crunch", Feature: &ARM.HasCRUNCH},
+ {Name: "neon", Feature: &ARM.HasNEON},
+ {Name: "idivt", Feature: &ARM.HasIDIVT},
+ {Name: "idiva", Feature: &ARM.HasIDIVA},
+ {Name: "lpae", Feature: &ARM.HasLPAE},
+ {Name: "evtstrm", Feature: &ARM.HasEVTSTRM},
+ {Name: "aes", Feature: &ARM.HasAES},
+ {Name: "crc32", Feature: &ARM.HasCRC32},
+ }
+
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go
index 7bcb36c7b..2d9002438 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go
@@ -8,7 +8,36 @@ import "runtime"
const cacheLineSize = 64
-func init() {
+func initOptions() {
+ options = []option{
+ {Name: "fp", Feature: &ARM64.HasFP},
+ {Name: "asimd", Feature: &ARM64.HasASIMD},
+ {Name: "evstrm", Feature: &ARM64.HasEVTSTRM},
+ {Name: "aes", Feature: &ARM64.HasAES},
+ {Name: "fphp", Feature: &ARM64.HasFPHP},
+ {Name: "jscvt", Feature: &ARM64.HasJSCVT},
+ {Name: "lrcpc", Feature: &ARM64.HasLRCPC},
+ {Name: "pmull", Feature: &ARM64.HasPMULL},
+ {Name: "sha1", Feature: &ARM64.HasSHA1},
+ {Name: "sha2", Feature: &ARM64.HasSHA2},
+ {Name: "sha3", Feature: &ARM64.HasSHA3},
+ {Name: "sha512", Feature: &ARM64.HasSHA512},
+ {Name: "sm3", Feature: &ARM64.HasSM3},
+ {Name: "sm4", Feature: &ARM64.HasSM4},
+ {Name: "sve", Feature: &ARM64.HasSVE},
+ {Name: "crc32", Feature: &ARM64.HasCRC32},
+ {Name: "atomics", Feature: &ARM64.HasATOMICS},
+ {Name: "asimdhp", Feature: &ARM64.HasASIMDHP},
+ {Name: "cpuid", Feature: &ARM64.HasCPUID},
+ {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM},
+ {Name: "fcma", Feature: &ARM64.HasFCMA},
+ {Name: "dcpop", Feature: &ARM64.HasDCPOP},
+ {Name: "asimddp", Feature: &ARM64.HasASIMDDP},
+ {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM},
+ }
+}
+
+func archInit() {
switch runtime.GOOS {
case "android", "darwin", "netbsd":
// Android and iOS don't seem to allow reading these registers.
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go
index fe139182c..6fc874f7f 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_linux.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go
@@ -6,7 +6,7 @@
package cpu
-func init() {
+func archInit() {
if err := readHWCAP(); err != nil {
return
}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go
index 6c8d975d4..99f8a6399 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go
@@ -7,8 +7,6 @@
package cpu
-const cacheLineSize = 128
-
// HWCAP/HWCAP2 bits. These are exposed by the kernel.
const (
// ISA Level
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go
index d579eaef4..b88d6b8f6 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go
@@ -4,8 +4,6 @@
package cpu
-const cacheLineSize = 256
-
const (
// bit mask values from /usr/include/bits/hwcap.h
hwcap_ZARCH = 2
diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go
index 6165f1212..57b5b677d 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go
@@ -7,3 +7,9 @@
package cpu
const cacheLineSize = 32
+
+func initOptions() {
+ options = []option{
+ {Name: "msa", Feature: &MIPS64X.HasMSA},
+ }
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go
index 1269eee88..cfc1946b7 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go
@@ -7,3 +7,5 @@
package cpu
const cacheLineSize = 32
+
+func initOptions() {}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go
new file mode 100644
index 000000000..b412efc1b
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go
@@ -0,0 +1,9 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !linux,arm
+
+package cpu
+
+func archInit() {}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go
new file mode 100644
index 000000000..d28d675b5
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go
@@ -0,0 +1,16 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ppc64 ppc64le
+
+package cpu
+
+const cacheLineSize = 128
+
+func initOptions() {
+ options = []option{
+ {Name: "darn", Feature: &PPC64.HasDARN},
+ {Name: "scv", Feature: &PPC64.HasSCV},
+ }
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
index efe2b7a84..8b08de341 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
@@ -7,3 +7,5 @@
package cpu
const cacheLineSize = 32
+
+func initOptions() {}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go
new file mode 100644
index 000000000..544cd621c
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go
@@ -0,0 +1,30 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+const cacheLineSize = 256
+
+func initOptions() {
+ options = []option{
+ {Name: "zarch", Feature: &S390X.HasZARCH},
+ {Name: "stfle", Feature: &S390X.HasSTFLE},
+ {Name: "ldisp", Feature: &S390X.HasLDISP},
+ {Name: "eimm", Feature: &S390X.HasEIMM},
+ {Name: "dfp", Feature: &S390X.HasDFP},
+ {Name: "etf3eh", Feature: &S390X.HasETF3EH},
+ {Name: "msa", Feature: &S390X.HasMSA},
+ {Name: "aes", Feature: &S390X.HasAES},
+ {Name: "aescbc", Feature: &S390X.HasAESCBC},
+ {Name: "aesctr", Feature: &S390X.HasAESCTR},
+ {Name: "aesgcm", Feature: &S390X.HasAESGCM},
+ {Name: "ghash", Feature: &S390X.HasGHASH},
+ {Name: "sha1", Feature: &S390X.HasSHA1},
+ {Name: "sha256", Feature: &S390X.HasSHA256},
+ {Name: "sha3", Feature: &S390X.HasSHA3},
+ {Name: "sha512", Feature: &S390X.HasSHA512},
+ {Name: "vx", Feature: &S390X.HasVX},
+ {Name: "vxe", Feature: &S390X.HasVXE},
+ }
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go
index 8681e876a..5382f2a22 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_wasm.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go
@@ -11,3 +11,7 @@ package cpu
// rules are good enough.
const cacheLineSize = 0
+
+func initOptions() {}
+
+func archInit() {}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go
index d70d317f5..2ad039d40 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_x86.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go
@@ -6,9 +6,37 @@
package cpu
+import "runtime"
+
const cacheLineSize = 64
-func init() {
+func initOptions() {
+ options = []option{
+ {Name: "adx", Feature: &X86.HasADX},
+ {Name: "aes", Feature: &X86.HasAES},
+ {Name: "avx", Feature: &X86.HasAVX},
+ {Name: "avx2", Feature: &X86.HasAVX2},
+ {Name: "bmi1", Feature: &X86.HasBMI1},
+ {Name: "bmi2", Feature: &X86.HasBMI2},
+ {Name: "erms", Feature: &X86.HasERMS},
+ {Name: "fma", Feature: &X86.HasFMA},
+ {Name: "osxsave", Feature: &X86.HasOSXSAVE},
+ {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ},
+ {Name: "popcnt", Feature: &X86.HasPOPCNT},
+ {Name: "rdrand", Feature: &X86.HasRDRAND},
+ {Name: "rdseed", Feature: &X86.HasRDSEED},
+ {Name: "sse3", Feature: &X86.HasSSE3},
+ {Name: "sse41", Feature: &X86.HasSSE41},
+ {Name: "sse42", Feature: &X86.HasSSE42},
+ {Name: "ssse3", Feature: &X86.HasSSSE3},
+
+ // These capabilities should always be enabled on amd64:
+ {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"},
+ }
+}
+
+func archInit() {
+
Initialized = true
maxID, _, _, _ := cpuid(0, 0)
@@ -52,6 +80,7 @@ func init() {
X86.HasERMS = isSet(9, ebx7)
X86.HasRDSEED = isSet(18, ebx7)
X86.HasADX = isSet(19, ebx7)
+
}
func isSet(bitpos uint, value uint32) bool {
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index fad483bb9..027bcafde 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -1965,10 +1965,15 @@ func isGroupMember(gid int) bool {
}
//sys faccessat(dirfd int, path string, mode uint32) (err error)
+//sys Faccessat2(dirfd int, path string, mode uint32, flags int) (err error)
func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
- if flags & ^(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 {
- return EINVAL
+ if flags == 0 {
+ return faccessat(dirfd, path, mode)
+ }
+
+ if err := Faccessat2(dirfd, path, mode, flags); err != ENOSYS && err != EPERM {
+ return err
}
// The Linux kernel faccessat system call does not take any flags.
@@ -1977,8 +1982,8 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
// Because people naturally expect syscall.Faccessat to act
// like C faccessat, we do the same.
- if flags == 0 {
- return faccessat(dirfd, path, mode)
+ if flags & ^(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 {
+ return EINVAL
}
var st Stat_t
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index f8bd50c11..e5c4b5d28 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -265,6 +265,7 @@ const (
CAP_AUDIT_READ = 0x25
CAP_AUDIT_WRITE = 0x1d
CAP_BLOCK_SUSPEND = 0x24
+ CAP_BPF = 0x27
CAP_CHOWN = 0x0
CAP_DAC_OVERRIDE = 0x1
CAP_DAC_READ_SEARCH = 0x2
@@ -273,7 +274,7 @@ const (
CAP_IPC_LOCK = 0xe
CAP_IPC_OWNER = 0xf
CAP_KILL = 0x5
- CAP_LAST_CAP = 0x25
+ CAP_LAST_CAP = 0x27
CAP_LEASE = 0x1c
CAP_LINUX_IMMUTABLE = 0x9
CAP_MAC_ADMIN = 0x21
@@ -283,6 +284,7 @@ const (
CAP_NET_BIND_SERVICE = 0xa
CAP_NET_BROADCAST = 0xb
CAP_NET_RAW = 0xd
+ CAP_PERFMON = 0x26
CAP_SETFCAP = 0x1f
CAP_SETGID = 0x6
CAP_SETPCAP = 0x8
@@ -372,6 +374,7 @@ const (
DEVLINK_GENL_NAME = "devlink"
DEVLINK_GENL_VERSION = 0x1
DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14
+ DEVMEM_MAGIC = 0x454d444d
DEVPTS_SUPER_MAGIC = 0x1cd1
DMA_BUF_MAGIC = 0x444d4142
DT_BLK = 0x6
@@ -475,6 +478,7 @@ const (
ETH_P_MOBITEX = 0x15
ETH_P_MPLS_MC = 0x8848
ETH_P_MPLS_UC = 0x8847
+ ETH_P_MRP = 0x88e3
ETH_P_MVRP = 0x88f5
ETH_P_NCSI = 0x88f8
ETH_P_NSH = 0x894f
@@ -602,8 +606,9 @@ const (
FSCRYPT_POLICY_FLAGS_PAD_4 = 0x0
FSCRYPT_POLICY_FLAGS_PAD_8 = 0x1
FSCRYPT_POLICY_FLAGS_PAD_MASK = 0x3
- FSCRYPT_POLICY_FLAGS_VALID = 0xf
+ FSCRYPT_POLICY_FLAGS_VALID = 0x1f
FSCRYPT_POLICY_FLAG_DIRECT_KEY = 0x4
+ FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32 = 0x10
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 = 0x8
FSCRYPT_POLICY_V1 = 0x0
FSCRYPT_POLICY_V2 = 0x2
@@ -632,7 +637,7 @@ const (
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
- FS_POLICY_FLAGS_VALID = 0xf
+ FS_POLICY_FLAGS_VALID = 0x1f
FS_VERITY_FL = 0x100000
FS_VERITY_HASH_ALG_SHA256 = 0x1
FS_VERITY_HASH_ALG_SHA512 = 0x2
@@ -834,6 +839,7 @@ const (
IPPROTO_EGP = 0x8
IPPROTO_ENCAP = 0x62
IPPROTO_ESP = 0x32
+ IPPROTO_ETHERNET = 0x8f
IPPROTO_FRAGMENT = 0x2c
IPPROTO_GRE = 0x2f
IPPROTO_HOPOPTS = 0x0
@@ -847,6 +853,7 @@ const (
IPPROTO_L2TP = 0x73
IPPROTO_MH = 0x87
IPPROTO_MPLS = 0x89
+ IPPROTO_MPTCP = 0x106
IPPROTO_MTP = 0x5c
IPPROTO_NONE = 0x3b
IPPROTO_PIM = 0x67
@@ -1016,6 +1023,7 @@ const (
KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2
KEYCTL_CAPS0_PUBLIC_KEY = 0x8
KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40
+ KEYCTL_CAPS1_NOTIFICATIONS = 0x4
KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1
KEYCTL_CAPS1_NS_KEY_TAG = 0x2
KEYCTL_CHOWN = 0x4
@@ -1053,6 +1061,7 @@ const (
KEYCTL_SUPPORTS_VERIFY = 0x8
KEYCTL_UNLINK = 0x9
KEYCTL_UPDATE = 0x2
+ KEYCTL_WATCH_KEY = 0x20
KEY_REQKEY_DEFL_DEFAULT = 0x0
KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6
KEY_REQKEY_DEFL_NO_CHANGE = -0x1
@@ -1096,6 +1105,8 @@ const (
LOOP_SET_FD = 0x4c00
LOOP_SET_STATUS = 0x4c02
LOOP_SET_STATUS64 = 0x4c04
+ LOOP_SET_STATUS_CLEARABLE_FLAGS = 0x4
+ LOOP_SET_STATUS_SETTABLE_FLAGS = 0xc
LO_KEY_SIZE = 0x20
LO_NAME_SIZE = 0x40
MADV_COLD = 0x14
@@ -1992,8 +2003,10 @@ const (
STATX_ATTR_APPEND = 0x20
STATX_ATTR_AUTOMOUNT = 0x1000
STATX_ATTR_COMPRESSED = 0x4
+ STATX_ATTR_DAX = 0x2000
STATX_ATTR_ENCRYPTED = 0x800
STATX_ATTR_IMMUTABLE = 0x10
+ STATX_ATTR_MOUNT_ROOT = 0x2000
STATX_ATTR_NODUMP = 0x40
STATX_ATTR_VERITY = 0x100000
STATX_BASIC_STATS = 0x7ff
@@ -2002,6 +2015,7 @@ const (
STATX_CTIME = 0x80
STATX_GID = 0x10
STATX_INO = 0x100
+ STATX_MNT_ID = 0x1000
STATX_MODE = 0x2
STATX_MTIME = 0x40
STATX_NLINK = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 8b0e024b9..84f71e99f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -192,6 +192,7 @@ const (
PPPIOCSRASYNCMAP = 0x40047454
PPPIOCSXASYNCMAP = 0x4020744f
PPPIOCXFERUNIT = 0x744e
+ PROT_BTI = 0x10
PR_SET_PTRACER_ANY = 0xffffffffffffffff
PTRACE_SYSEMU = 0x1f
PTRACE_SYSEMU_SINGLESTEP = 0x20
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index f6603de4f..4eec7a795 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -1821,6 +1821,21 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Faccessat2(dirfd int, path string, mode uint32, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FACCESSAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(pathname)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index 54559a895..a597e061c 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -433,4 +433,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index 054a741b7..8c102e55a 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -355,4 +355,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index 307f2ba12..98f9b68fb 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -397,4 +397,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index e9404dd54..4dabc33fb 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -300,4 +300,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index 68bb6d29b..d5724e596 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -418,4 +418,5 @@ const (
SYS_CLONE3 = 4435
SYS_OPENAT2 = 4437
SYS_PIDFD_GETFD = 4438
+ SYS_FACCESSAT2 = 4439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index 4e5251185..c1d824a4f 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -348,4 +348,5 @@ const (
SYS_CLONE3 = 5435
SYS_OPENAT2 = 5437
SYS_PIDFD_GETFD = 5438
+ SYS_FACCESSAT2 = 5439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index 4d9aa3003..598dd5d63 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -348,4 +348,5 @@ const (
SYS_CLONE3 = 5435
SYS_OPENAT2 = 5437
SYS_PIDFD_GETFD = 5438
+ SYS_FACCESSAT2 = 5439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index 64af0707d..c36782d08 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -418,4 +418,5 @@ const (
SYS_CLONE3 = 4435
SYS_OPENAT2 = 4437
SYS_PIDFD_GETFD = 4438
+ SYS_FACCESSAT2 = 4439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index cc3c067ba..9287538d3 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -397,4 +397,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index 4050ff983..4dafad835 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -397,4 +397,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index 529abb6a7..6642cfccd 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -299,4 +299,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index 276650010..23367b946 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -362,4 +362,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index 4dc82bb24..083aa0204 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -376,4 +376,5 @@ const (
SYS_PIDFD_OPEN = 434
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 27d67ac8f..a8c470880 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -67,7 +67,9 @@ type Statx_t struct {
Rdev_minor uint32
Dev_major uint32
Dev_minor uint32
- _ [14]uint64
+ Mnt_id uint64
+ _ uint64
+ _ [12]uint64
}
type Fsid struct {
@@ -671,6 +673,8 @@ type InotifyEvent struct {
const SizeofInotifyEvent = 0x10
+const SI_LOAD_SHIFT = 0x10
+
type Utsname struct {
Sysname [65]byte
Nodename [65]byte
@@ -1912,6 +1916,10 @@ const (
BPF_MAP_DELETE_BATCH = 0x1b
BPF_LINK_CREATE = 0x1c
BPF_LINK_UPDATE = 0x1d
+ BPF_LINK_GET_FD_BY_ID = 0x1e
+ BPF_LINK_GET_NEXT_ID = 0x1f
+ BPF_ENABLE_STATS = 0x20
+ BPF_ITER_CREATE = 0x21
BPF_MAP_TYPE_UNSPEC = 0x0
BPF_MAP_TYPE_HASH = 0x1
BPF_MAP_TYPE_ARRAY = 0x2
@@ -1939,6 +1947,7 @@ const (
BPF_MAP_TYPE_SK_STORAGE = 0x18
BPF_MAP_TYPE_DEVMAP_HASH = 0x19
BPF_MAP_TYPE_STRUCT_OPS = 0x1a
+ BPF_MAP_TYPE_RINGBUF = 0x1b
BPF_PROG_TYPE_UNSPEC = 0x0
BPF_PROG_TYPE_SOCKET_FILTER = 0x1
BPF_PROG_TYPE_KPROBE = 0x2
@@ -1997,6 +2006,18 @@ const (
BPF_TRACE_FEXIT = 0x19
BPF_MODIFY_RETURN = 0x1a
BPF_LSM_MAC = 0x1b
+ BPF_TRACE_ITER = 0x1c
+ BPF_CGROUP_INET4_GETPEERNAME = 0x1d
+ BPF_CGROUP_INET6_GETPEERNAME = 0x1e
+ BPF_CGROUP_INET4_GETSOCKNAME = 0x1f
+ BPF_CGROUP_INET6_GETSOCKNAME = 0x20
+ BPF_XDP_DEVMAP = 0x21
+ BPF_LINK_TYPE_UNSPEC = 0x0
+ BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1
+ BPF_LINK_TYPE_TRACING = 0x2
+ BPF_LINK_TYPE_CGROUP = 0x3
+ BPF_LINK_TYPE_ITER = 0x4
+ BPF_LINK_TYPE_NETNS = 0x5
BPF_ANY = 0x0
BPF_NOEXIST = 0x1
BPF_EXIST = 0x2
@@ -2012,6 +2033,7 @@ const (
BPF_F_WRONLY_PROG = 0x100
BPF_F_CLONE = 0x200
BPF_F_MMAPABLE = 0x400
+ BPF_STATS_RUN_TIME = 0x0
BPF_STACK_BUILD_ID_EMPTY = 0x0
BPF_STACK_BUILD_ID_VALID = 0x1
BPF_STACK_BUILD_ID_IP = 0x2
@@ -2035,16 +2057,30 @@ const (
BPF_F_CURRENT_CPU = 0xffffffff
BPF_F_CTXLEN_MASK = 0xfffff00000000
BPF_F_CURRENT_NETNS = -0x1
+ BPF_CSUM_LEVEL_QUERY = 0x0
+ BPF_CSUM_LEVEL_INC = 0x1
+ BPF_CSUM_LEVEL_DEC = 0x2
+ BPF_CSUM_LEVEL_RESET = 0x3
BPF_F_ADJ_ROOM_FIXED_GSO = 0x1
BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2
BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10
+ BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20
BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff
BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38
BPF_F_SYSCTL_BASE_NAME = 0x1
BPF_SK_STORAGE_GET_F_CREATE = 0x1
BPF_F_GET_BRANCH_RECORDS_SIZE = 0x1
+ BPF_RB_NO_WAKEUP = 0x1
+ BPF_RB_FORCE_WAKEUP = 0x2
+ BPF_RB_AVAIL_DATA = 0x0
+ BPF_RB_RING_SIZE = 0x1
+ BPF_RB_CONS_POS = 0x2
+ BPF_RB_PROD_POS = 0x3
+ BPF_RINGBUF_BUSY_BIT = 0x80000000
+ BPF_RINGBUF_DISCARD_BIT = 0x40000000
+ BPF_RINGBUF_HDR_SZ = 0x8
BPF_ADJ_ROOM_NET = 0x0
BPF_ADJ_ROOM_MAC = 0x1
BPF_HDR_START_MAC = 0x0
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 77cfa9c3f..7af0f1110 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -84,7 +84,7 @@ github.com/containers/buildah/pkg/secrets
github.com/containers/buildah/pkg/supplemented
github.com/containers/buildah/pkg/umask
github.com/containers/buildah/util
-# github.com/containers/common v0.20.3-0.20200827091701-a550d6a98aa3
+# github.com/containers/common v0.21.0
github.com/containers/common/pkg/apparmor
github.com/containers/common/pkg/apparmor/internal/supported
github.com/containers/common/pkg/auth
@@ -97,13 +97,14 @@ github.com/containers/common/pkg/sysinfo
github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible
github.com/containers/conmon/runner/config
-# github.com/containers/image/v5 v5.5.2
+# github.com/containers/image/v5 v5.5.2 => github.com/containers/image/v5 v5.5.2-0.20200902171422-1c313b2d23e0
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
github.com/containers/image/v5/directory/explicitfilepath
github.com/containers/image/v5/docker
github.com/containers/image/v5/docker/archive
github.com/containers/image/v5/docker/daemon
+github.com/containers/image/v5/docker/internal/tarfile
github.com/containers/image/v5/docker/policyconfiguration
github.com/containers/image/v5/docker/reference
github.com/containers/image/v5/docker/tarfile
@@ -158,7 +159,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.23.2
+# github.com/containers/storage v1.23.5
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -305,7 +306,7 @@ github.com/google/gofuzz
github.com/google/shlex
# github.com/google/uuid v1.1.2
github.com/google/uuid
-# github.com/gorilla/mux v1.7.4
+# github.com/gorilla/mux v1.8.0
github.com/gorilla/mux
# github.com/gorilla/schema v1.2.0
github.com/gorilla/schema
@@ -321,7 +322,7 @@ github.com/hpcloud/tail/ratelimiter
github.com/hpcloud/tail/util
github.com/hpcloud/tail/watch
github.com/hpcloud/tail/winfile
-# github.com/imdario/mergo v0.3.9
+# github.com/imdario/mergo v0.3.11
github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.0.0
github.com/inconshreveable/mousetrap
@@ -329,19 +330,17 @@ github.com/inconshreveable/mousetrap
github.com/ishidawataru/sctp
# github.com/json-iterator/go v1.1.10
github.com/json-iterator/go
-# github.com/klauspost/compress v1.10.11
+# github.com/klauspost/compress v1.11.0
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
github.com/klauspost/compress/snappy
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
-# github.com/klauspost/pgzip v1.2.4
+# github.com/klauspost/pgzip v1.2.5
github.com/klauspost/pgzip
# github.com/konsorten/go-windows-terminal-sequences v1.0.3
github.com/konsorten/go-windows-terminal-sequences
-# github.com/mattn/go-isatty v0.0.12
-github.com/mattn/go-isatty
# github.com/mattn/go-runewidth v0.0.9
github.com/mattn/go-runewidth
# github.com/mattn/go-shellwords v1.0.10
@@ -350,6 +349,8 @@ github.com/mattn/go-shellwords
github.com/matttproud/golang_protobuf_extensions/pbutil
# github.com/mistifyio/go-zfs v2.1.1+incompatible
github.com/mistifyio/go-zfs
+# github.com/moby/sys/mountinfo v0.1.3
+github.com/moby/sys/mountinfo
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
github.com/modern-go/concurrent
# github.com/modern-go/reflect2 v1.0.1
@@ -366,7 +367,7 @@ github.com/nxadm/tail/ratelimiter
github.com/nxadm/tail/util
github.com/nxadm/tail/watch
github.com/nxadm/tail/winfile
-# github.com/onsi/ginkgo v1.14.0
+# github.com/onsi/ginkgo v1.14.1
github.com/onsi/ginkgo
github.com/onsi/ginkgo/config
github.com/onsi/ginkgo/extensions/table
@@ -394,7 +395,7 @@ github.com/onsi/ginkgo/reporters/stenographer
github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable
github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty
github.com/onsi/ginkgo/types
-# github.com/onsi/gomega v1.10.1
+# github.com/onsi/gomega v1.10.2
github.com/onsi/gomega
github.com/onsi/gomega/format
github.com/onsi/gomega/gbytes
@@ -524,7 +525,7 @@ github.com/uber/jaeger-client-go/transport
github.com/uber/jaeger-client-go/utils
# github.com/uber/jaeger-lib v2.2.0+incompatible
github.com/uber/jaeger-lib/metrics
-# github.com/ulikunitz/xz v0.5.7
+# github.com/ulikunitz/xz v0.5.8
github.com/ulikunitz/xz
github.com/ulikunitz/xz/internal/hash
github.com/ulikunitz/xz/internal/xlog
@@ -537,7 +538,7 @@ github.com/varlink/go/varlink/idl
github.com/vbatts/tar-split/archive/tar
github.com/vbatts/tar-split/tar/asm
github.com/vbatts/tar-split/tar/storage
-# github.com/vbauerster/mpb/v5 v5.2.2
+# github.com/vbauerster/mpb/v5 v5.3.0
github.com/vbauerster/mpb/v5
github.com/vbauerster/mpb/v5/cwriter
github.com/vbauerster/mpb/v5/decor
@@ -605,7 +606,7 @@ golang.org/x/oauth2/internal
# golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
-# golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1
+# golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed
golang.org/x/sys/cpu
golang.org/x/sys/internal/unsafeheader
golang.org/x/sys/unix
@@ -700,7 +701,7 @@ gopkg.in/yaml.v3
# k8s.io/api v0.0.0-20190620084959-7cf5895f2711
k8s.io/api/apps/v1
k8s.io/api/core/v1
-# k8s.io/apimachinery v0.19.0
+# k8s.io/apimachinery v0.19.1
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/resource
k8s.io/apimachinery/pkg/apis/meta/v1